From bc82477f18f3c670b1293c26bd0172b1d7ec4233 Mon Sep 17 00:00:00 2001 From: Samuel Mutel <12967891+smutel@users.noreply.github.com> Date: Thu, 16 Jun 2022 18:54:01 +0200 Subject: [PATCH] enh: Automate the doc generation --- .github/workflows/master.yml | 10 +- docs/data-sources/dcim_platform.md | 22 - docs/data-sources/dcim_site.md | 22 - docs/data-sources/ipam_aggregate.md | 24 - docs/data-sources/ipam_ip_addresses.md | 22 - docs/data-sources/ipam_role.md | 22 - docs/data-sources/ipam_service.md | 29 - docs/data-sources/ipam_vlan.md | 24 - docs/data-sources/ipam_vlan_group.md | 24 - ...json_circuits_circuit_terminations_list.md | 24 - .../json_circuits_circuit_types_list.md | 24 - .../json_circuits_circuits_list.md | 24 - .../json_circuits_provider_networks_list.md | 24 - .../json_circuits_providers_list.md | 24 - docs/data-sources/json_dcim_cables_list.md | 24 - .../json_dcim_console_port_templates_list.md | 24 - .../json_dcim_console_ports_list.md | 24 - ...dcim_console_server_port_templates_list.md | 24 - .../json_dcim_console_server_ports_list.md | 24 - .../json_dcim_device_bay_templates_list.md | 24 - .../json_dcim_device_bays_list.md | 24 - .../json_dcim_device_roles_list.md | 24 - .../json_dcim_device_types_list.md | 24 - docs/data-sources/json_dcim_devices_list.md | 24 - .../json_dcim_front_port_templates_list.md | 24 - .../json_dcim_front_ports_list.md | 24 - .../json_dcim_interface_templates_list.md | 24 - .../data-sources/json_dcim_interfaces_list.md | 24 - .../json_dcim_inventory_items_list.md | 24 - docs/data-sources/json_dcim_locations_list.md | 24 - .../json_dcim_manufacturers_list.md | 24 - docs/data-sources/json_dcim_platforms_list.md | 24 - .../json_dcim_power_feeds_list.md | 24 - .../json_dcim_power_outlet_templates_list.md | 24 - .../json_dcim_power_outlets_list.md | 24 - .../json_dcim_power_panels_list.md | 24 - .../json_dcim_power_port_templates_list.md | 24 - .../json_dcim_power_ports_list.md | 24 - .../json_dcim_rack_reservations_list.md | 24 - .../data-sources/json_dcim_rack_roles_list.md | 24 - docs/data-sources/json_dcim_racks_list.md | 24 - .../json_dcim_rear_port_templates_list.md | 24 - .../data-sources/json_dcim_rear_ports_list.md | 24 - docs/data-sources/json_dcim_regions_list.md | 24 - .../json_dcim_site_groups_list.md | 24 - docs/data-sources/json_dcim_sites_list.md | 24 - .../json_dcim_virtual_chassis_list.md | 24 - .../json_extras_config_contexts_list.md | 24 - .../json_extras_content_types_list.md | 24 - .../json_extras_custom_fields_list.md | 24 - .../json_extras_custom_links_list.md | 24 - .../json_extras_export_templates_list.md | 24 - .../json_extras_image_attachments_list.md | 24 - .../json_extras_job_results_list.md | 24 - .../json_extras_journal_entries_list.md | 24 - .../json_extras_object_changes_list.md | 24 - docs/data-sources/json_extras_tags_list.md | 24 - .../data-sources/json_extras_webhooks_list.md | 24 - .../data-sources/json_ipam_aggregates_list.md | 24 - docs/data-sources/json_ipam_asns_list.md | 24 - .../json_ipam_fhrp_group_assignments_list.md | 24 - .../json_ipam_fhrp_groups_list.md | 24 - .../json_ipam_ip_addresses_list.md | 24 - docs/data-sources/json_ipam_ip_ranges_list.md | 24 - docs/data-sources/json_ipam_prefixes_list.md | 24 - docs/data-sources/json_ipam_rirs_list.md | 24 - docs/data-sources/json_ipam_roles_list.md | 24 - .../json_ipam_route_targets_list.md | 24 - docs/data-sources/json_ipam_services_list.md | 24 - .../json_ipam_vlan_groups_list.md | 24 - docs/data-sources/json_ipam_vlans_list.md | 24 - docs/data-sources/json_ipam_vrfs_list.md | 24 - .../json_tenancy_contact_assignments_list.md | 24 - .../json_tenancy_contact_groups_list.md | 24 - .../json_tenancy_contact_roles_list.md | 24 - .../json_tenancy_contacts_list.md | 24 - .../json_tenancy_tenant_groups_list.md | 24 - .../data-sources/json_tenancy_tenants_list.md | 24 - docs/data-sources/json_users_groups_list.md | 24 - .../json_users_permissions_list.md | 24 - docs/data-sources/json_users_tokens_list.md | 24 - docs/data-sources/json_users_users_list.md | 24 - ...json_virtualization_cluster_groups_list.md | 24 - .../json_virtualization_cluster_types_list.md | 24 - .../json_virtualization_clusters_list.md | 24 - .../json_virtualization_interfaces_list.md | 24 - ...on_virtualization_virtual_machines_list.md | 24 - .../json_wireless_wireless_lan_groups_list.md | 24 - .../json_wireless_wireless_lans_list.md | 24 - .../json_wireless_wireless_links_list.md | 24 - docs/data-sources/tenancy_contact.md | 22 - docs/data-sources/tenancy_contact_group.md | 22 - docs/data-sources/tenancy_contact_role.md | 22 - docs/data-sources/tenancy_tenant.md | 22 - docs/data-sources/tenancy_tenant_group.md | 22 - docs/data-sources/virtualization_cluster.md | 22 - docs/index.md | 52 - docs/resources/ipam_aggregate.md | 91 - docs/resources/ipam_ip_addresses.md | 99 - docs/resources/ipam_prefix.md | 99 - docs/resources/ipam_service.md | 97 - docs/resources/ipam_vlan.md | 99 - docs/resources/ipam_vlan_group.md | 41 - docs/resources/tenancy_contact.md | 98 - docs/resources/tenancy_contact_assignment.md | 37 - docs/resources/tenancy_contact_group.md | 92 - docs/resources/tenancy_contact_role.md | 90 - docs/resources/tenancy_tenant.md | 94 - docs/resources/tenancy_tenant_group.md | 41 - docs/resources/virtualization_interface.md | 93 - docs/resources/virtualization_vm.md | 107 - .../netbox_dcim_platform/data-source.tf | 3 + .../netbox_dcim_site/data-source.tf | 3 + .../netbox_ipam_aggregate/data-source.tf | 4 + .../netbox_ipam_ip_addresses/data-source.tf | 3 + .../netbox_ipam_role/data-source.tf | 3 + .../netbox_ipam_service/data-source.tf | 6 + .../netbox_ipam_vlan/data-source.tf | 4 + .../netbox_ipam_vlan_group/data-source.tf | 3 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../netbox_json_ipam_asns_list/data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../netbox_json_ipam_rirs_list/data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../netbox_json_ipam_vrfs_list/data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + .../data-source.tf | 7 + examples/provider/provider.tf | 26 + .../netbox_ipam_aggregate/resource.tf | 52 + .../netbox_ipam_ip_addresses/resource.tf | 51 + .../resources/netbox_ipam_prefix/resource.tf | 55 + .../resources/netbox_ipam_service/resource.tf | 55 + .../resources/netbox_ipam_vlan/resource.tf | 56 + .../netbox_ipam_vlan_group/resource.tf | 9 + .../netbox_tenancy_contact/resource.tf | 56 + .../resource.tf | 7 + .../netbox_tenancy_contact_group/resource.tf | 53 + .../netbox_tenancy_contact_role/resource.tf | 52 + .../netbox_tenancy_tenant/resource.tf | 54 + .../netbox_tenancy_tenant_group/resource.tf | 9 + .../resource.tf | 49 + .../netbox_virtualization_vm/resource.tf | 61 + go.mod | 19 + go.sum | 122 +- main.go | 6 + netbox/data_netbox_dcim_platform.go | 13 +- netbox/data_netbox_dcim_site.go | 9 +- netbox/data_netbox_ipam_aggregate.go | 14 +- netbox/data_netbox_ipam_ip_addresses.go | 9 +- netbox/data_netbox_ipam_role.go | 9 +- netbox/data_netbox_ipam_service.go | 13 +- netbox/data_netbox_ipam_vlan.go | 18 +- netbox/data_netbox_ipam_vlan_group.go | 9 +- ...json_circuits_circuit_terminations_list.go | 65 +- ...netbox_json_circuits_circuit_types_list.go | 65 +- ...data_netbox_json_circuits_circuits_list.go | 65 +- ...ox_json_circuits_provider_networks_list.go | 65 +- ...ata_netbox_json_circuits_providers_list.go | 65 +- netbox/data_netbox_json_dcim_cables_list.go | 65 +- ...x_json_dcim_console_port_templates_list.go | 65 +- ...ata_netbox_json_dcim_console_ports_list.go | 65 +- ...dcim_console_server_port_templates_list.go | 65 +- ...box_json_dcim_console_server_ports_list.go | 65 +- ...box_json_dcim_device_bay_templates_list.go | 65 +- .../data_netbox_json_dcim_device_bays_list.go | 65 +- ...data_netbox_json_dcim_device_roles_list.go | 65 +- ...data_netbox_json_dcim_device_types_list.go | 65 +- netbox/data_netbox_json_dcim_devices_list.go | 65 +- ...box_json_dcim_front_port_templates_list.go | 65 +- .../data_netbox_json_dcim_front_ports_list.go | 65 +- ...tbox_json_dcim_interface_templates_list.go | 65 +- .../data_netbox_json_dcim_interfaces_list.go | 65 +- ...a_netbox_json_dcim_inventory_items_list.go | 65 +- .../data_netbox_json_dcim_locations_list.go | 65 +- ...ata_netbox_json_dcim_manufacturers_list.go | 65 +- .../data_netbox_json_dcim_platforms_list.go | 65 +- .../data_netbox_json_dcim_power_feeds_list.go | 65 +- ...x_json_dcim_power_outlet_templates_list.go | 65 +- ...ata_netbox_json_dcim_power_outlets_list.go | 65 +- ...data_netbox_json_dcim_power_panels_list.go | 65 +- ...box_json_dcim_power_port_templates_list.go | 65 +- .../data_netbox_json_dcim_power_ports_list.go | 65 +- ...netbox_json_dcim_rack_reservations_list.go | 65 +- .../data_netbox_json_dcim_rack_roles_list.go | 65 +- netbox/data_netbox_json_dcim_racks_list.go | 65 +- ...tbox_json_dcim_rear_port_templates_list.go | 65 +- .../data_netbox_json_dcim_rear_ports_list.go | 65 +- netbox/data_netbox_json_dcim_regions_list.go | 65 +- .../data_netbox_json_dcim_site_groups_list.go | 65 +- netbox/data_netbox_json_dcim_sites_list.go | 65 +- ...a_netbox_json_dcim_virtual_chassis_list.go | 65 +- ...netbox_json_extras_config_contexts_list.go | 65 +- ...a_netbox_json_extras_content_types_list.go | 65 +- ...a_netbox_json_extras_custom_fields_list.go | 65 +- ...ta_netbox_json_extras_custom_links_list.go | 65 +- ...etbox_json_extras_export_templates_list.go | 65 +- ...tbox_json_extras_image_attachments_list.go | 65 +- ...ata_netbox_json_extras_job_results_list.go | 65 +- ...netbox_json_extras_journal_entries_list.go | 65 +- ..._netbox_json_extras_object_changes_list.go | 65 +- netbox/data_netbox_json_extras_tags_list.go | 65 +- .../data_netbox_json_extras_webhooks_list.go | 65 +- .../data_netbox_json_ipam_aggregates_list.go | 65 +- netbox/data_netbox_json_ipam_asns_list.go | 65 +- ...x_json_ipam_fhrp_group_assignments_list.go | 65 +- .../data_netbox_json_ipam_fhrp_groups_list.go | 65 +- ...data_netbox_json_ipam_ip_addresses_list.go | 65 +- .../data_netbox_json_ipam_ip_ranges_list.go | 65 +- netbox/data_netbox_json_ipam_prefixes_list.go | 65 +- netbox/data_netbox_json_ipam_rirs_list.go | 65 +- netbox/data_netbox_json_ipam_roles_list.go | 65 +- ...ata_netbox_json_ipam_route_targets_list.go | 65 +- netbox/data_netbox_json_ipam_services_list.go | 65 +- .../data_netbox_json_ipam_vlan_groups_list.go | 65 +- netbox/data_netbox_json_ipam_vlans_list.go | 65 +- netbox/data_netbox_json_ipam_vrfs_list.go | 65 +- ...x_json_tenancy_contact_assignments_list.go | 65 +- ...netbox_json_tenancy_contact_groups_list.go | 65 +- ..._netbox_json_tenancy_contact_roles_list.go | 65 +- .../data_netbox_json_tenancy_contacts_list.go | 65 +- ..._netbox_json_tenancy_tenant_groups_list.go | 65 +- .../data_netbox_json_tenancy_tenants_list.go | 65 +- netbox/data_netbox_json_users_groups_list.go | 65 +- ...data_netbox_json_users_permissions_list.go | 65 +- netbox/data_netbox_json_users_tokens_list.go | 65 +- netbox/data_netbox_json_users_users_list.go | 65 +- ...json_virtualization_cluster_groups_list.go | 65 +- ..._json_virtualization_cluster_types_list.go | 65 +- ...etbox_json_virtualization_clusters_list.go | 65 +- ...box_json_virtualization_interfaces_list.go | 65 +- ...on_virtualization_virtual_machines_list.go | 65 +- ..._json_wireless_wireless_lan_groups_list.go | 65 +- ...netbox_json_wireless_wireless_lans_list.go | 65 +- ...etbox_json_wireless_wireless_links_list.go | 65 +- netbox/provider.go | 10 +- netbox/resource_netbox_ipam_aggregate.go | 47 +- netbox/resource_netbox_ipam_ip_addresses.go | 76 +- netbox/resource_netbox_ipam_prefix.go | 74 +- netbox/resource_netbox_ipam_service.go | 50 +- netbox/resource_netbox_ipam_vlan.go | 67 +- netbox/resource_netbox_ipam_vlan_group.go | 29 +- netbox/resource_netbox_tenancy_contact.go | 58 +- ...ource_netbox_tenancy_contact_assignment.go | 32 +- .../resource_netbox_tenancy_contact_group.go | 49 +- .../resource_netbox_tenancy_contact_role.go | 42 +- netbox/resource_netbox_tenancy_tenant.go | 56 +- .../resource_netbox_tenancy_tenant_group.go | 29 +- ...esource_netbox_virtualization_interface.go | 73 +- netbox/resource_netbox_virtualization_vm.go | 92 +- templates/index.md.tmpl | 26 + tools/tools.go | 8 + utils/generateJsonDatasources | 26 +- .../Masterminds/goutils/.travis.yml | 18 + .../Masterminds/goutils/CHANGELOG.md | 8 + .../Masterminds/goutils/LICENSE.txt | 202 ++ .../github.com/Masterminds/goutils/README.md | 70 + .../Masterminds/goutils/appveyor.yml | 21 + .../goutils/cryptorandomstringutils.go | 230 ++ .../Masterminds/goutils/randomstringutils.go | 248 +++ .../Masterminds/goutils/stringutils.go | 240 +++ .../Masterminds/goutils/wordutils.go | 357 ++++ .../Masterminds/semver/v3/.gitignore | 1 + .../Masterminds/semver/v3/.golangci.yml | 26 + .../Masterminds/semver/v3/CHANGELOG.md | 194 ++ .../Masterminds/semver/v3/LICENSE.txt | 19 + .../github.com/Masterminds/semver/v3/Makefile | 37 + .../Masterminds/semver/v3/README.md | 244 +++ .../Masterminds/semver/v3/collection.go | 24 + .../Masterminds/semver/v3/constraints.go | 568 +++++ .../github.com/Masterminds/semver/v3/doc.go | 184 ++ .../github.com/Masterminds/semver/v3/fuzz.go | 22 + .../Masterminds/semver/v3/version.go | 606 ++++++ .../Masterminds/sprig/v3/.gitignore | 2 + .../Masterminds/sprig/v3/CHANGELOG.md | 370 ++++ .../Masterminds/sprig/v3/LICENSE.txt | 19 + .../github.com/Masterminds/sprig/v3/Makefile | 9 + .../github.com/Masterminds/sprig/v3/README.md | 101 + .../github.com/Masterminds/sprig/v3/crypto.go | 653 ++++++ .../github.com/Masterminds/sprig/v3/date.go | 152 ++ .../Masterminds/sprig/v3/defaults.go | 163 ++ .../github.com/Masterminds/sprig/v3/dict.go | 174 ++ vendor/github.com/Masterminds/sprig/v3/doc.go | 19 + .../Masterminds/sprig/v3/functions.go | 382 ++++ .../github.com/Masterminds/sprig/v3/list.go | 464 ++++ .../Masterminds/sprig/v3/network.go | 12 + .../Masterminds/sprig/v3/numeric.go | 186 ++ .../Masterminds/sprig/v3/reflect.go | 28 + .../github.com/Masterminds/sprig/v3/regex.go | 83 + .../github.com/Masterminds/sprig/v3/semver.go | 23 + .../Masterminds/sprig/v3/strings.go | 236 ++ vendor/github.com/Masterminds/sprig/v3/url.go | 66 + vendor/github.com/armon/go-radix/.gitignore | 22 + vendor/github.com/armon/go-radix/.travis.yml | 3 + vendor/github.com/armon/go-radix/LICENSE | 20 + vendor/github.com/armon/go-radix/README.md | 38 + vendor/github.com/armon/go-radix/radix.go | 540 +++++ .../github.com/bgentry/speakeasy/.gitignore | 2 + vendor/github.com/bgentry/speakeasy/LICENSE | 24 + .../bgentry/speakeasy/LICENSE_WINDOWS | 201 ++ vendor/github.com/bgentry/speakeasy/Readme.md | 30 + .../github.com/bgentry/speakeasy/speakeasy.go | 49 + .../bgentry/speakeasy/speakeasy_unix.go | 93 + .../bgentry/speakeasy/speakeasy_windows.go | 41 + vendor/github.com/google/uuid/.travis.yml | 9 + vendor/github.com/google/uuid/CONTRIBUTING.md | 10 + vendor/github.com/google/uuid/CONTRIBUTORS | 9 + vendor/github.com/google/uuid/LICENSE | 27 + vendor/github.com/google/uuid/README.md | 19 + vendor/github.com/google/uuid/dce.go | 80 + vendor/github.com/google/uuid/doc.go | 12 + vendor/github.com/google/uuid/hash.go | 53 + vendor/github.com/google/uuid/marshal.go | 38 + vendor/github.com/google/uuid/node.go | 90 + vendor/github.com/google/uuid/node_js.go | 12 + vendor/github.com/google/uuid/node_net.go | 33 + vendor/github.com/google/uuid/null.go | 118 + vendor/github.com/google/uuid/sql.go | 59 + vendor/github.com/google/uuid/time.go | 123 ++ vendor/github.com/google/uuid/util.go | 43 + vendor/github.com/google/uuid/uuid.go | 294 +++ vendor/github.com/google/uuid/version1.go | 44 + vendor/github.com/google/uuid/version4.go | 76 + .../hashicorp/go-checkpoint/LICENSE | 354 +++ .../hashicorp/go-checkpoint/README.md | 22 + .../hashicorp/go-checkpoint/check.go | 368 ++++ .../hashicorp/go-checkpoint/telemetry.go | 118 + .../hashicorp/go-checkpoint/versions.go | 90 + .../github.com/hashicorp/go-cleanhttp/LICENSE | 363 ++++ .../hashicorp/go-cleanhttp/README.md | 30 + .../hashicorp/go-cleanhttp/cleanhttp.go | 58 + .../github.com/hashicorp/go-cleanhttp/doc.go | 20 + .../hashicorp/go-cleanhttp/handlers.go | 48 + .../hashicorp/hc-install/.go-version | 1 + .../hashicorp/hc-install/.goreleaser.yml | 29 + .../github.com/hashicorp/hc-install/LICENSE | 373 ++++ .../github.com/hashicorp/hc-install/README.md | 133 ++ .../hc-install/checkpoint/latest_version.go | 154 ++ .../hashicorp/hc-install/errors/errors.go | 18 + .../hashicorp/hc-install/fs/any_version.go | 95 + .../hashicorp/hc-install/fs/exact_version.go | 95 + .../github.com/hashicorp/hc-install/fs/fs.go | 14 + .../hashicorp/hc-install/fs/fs_unix.go | 45 + .../hashicorp/hc-install/fs/fs_windows.go | 81 + .../hashicorp/hc-install/installer.go | 154 ++ .../internal/build/get_go_version.go | 37 + .../hc-install/internal/build/go_build.go | 123 ++ .../internal/build/go_is_installed.go | 28 + .../internal/build/install_go_version.go | 53 + .../internal/httpclient/httpclient.go | 37 + .../hc-install/internal/pubkey/pubkey.go | 127 ++ .../releasesjson/checksum_downloader.go | 214 ++ .../internal/releasesjson/downloader.go | 179 ++ .../internal/releasesjson/product_version.go | 41 + .../internal/releasesjson/releases.go | 177 ++ .../hashicorp/hc-install/internal/src/src.go | 3 + .../internal/validators/validators.go | 18 + .../hc-install/internal/version/version.go | 9 + .../hashicorp/hc-install/product/consul.go | 55 + .../hashicorp/hc-install/product/product.go | 60 + .../hashicorp/hc-install/product/terraform.go | 55 + .../hc-install/releases/exact_version.go | 147 ++ .../hc-install/releases/latest_version.go | 171 ++ .../hashicorp/hc-install/releases/releases.go | 13 + .../hashicorp/hc-install/releases/versions.go | 82 + .../hashicorp/hc-install/src/src.go | 42 + .../hashicorp/terraform-exec/LICENSE | 373 ++++ .../internal/version/version.go | 9 + .../hashicorp/terraform-exec/tfexec/apply.go | 169 ++ .../hashicorp/terraform-exec/tfexec/cmd.go | 232 ++ .../terraform-exec/tfexec/cmd_default.go | 46 + .../terraform-exec/tfexec/cmd_linux.go | 54 + .../terraform-exec/tfexec/destroy.go | 156 ++ .../hashicorp/terraform-exec/tfexec/doc.go | 4 + .../hashicorp/terraform-exec/tfexec/errors.go | 39 + .../terraform-exec/tfexec/exit_errors.go | 331 +++ .../hashicorp/terraform-exec/tfexec/fmt.go | 159 ++ .../terraform-exec/tfexec/force_unlock.go | 50 + .../hashicorp/terraform-exec/tfexec/get.go | 52 + .../hashicorp/terraform-exec/tfexec/graph.go | 85 + .../hashicorp/terraform-exec/tfexec/import.go | 141 ++ .../hashicorp/terraform-exec/tfexec/init.go | 179 ++ .../terraform-exec/tfexec/options.go | 411 ++++ .../hashicorp/terraform-exec/tfexec/output.go | 63 + .../hashicorp/terraform-exec/tfexec/plan.go | 180 ++ .../terraform-exec/tfexec/providers_lock.go | 82 + .../terraform-exec/tfexec/providers_schema.go | 33 + .../terraform-exec/tfexec/refresh.go | 137 ++ .../hashicorp/terraform-exec/tfexec/show.go | 196 ++ .../terraform-exec/tfexec/state_mv.go | 105 + .../terraform-exec/tfexec/state_rm.go | 104 + .../hashicorp/terraform-exec/tfexec/taint.go | 78 + .../terraform-exec/tfexec/terraform.go | 165 ++ .../terraform-exec/tfexec/untaint.go | 78 + .../terraform-exec/tfexec/upgrade012.go | 80 + .../terraform-exec/tfexec/upgrade013.go | 68 + .../terraform-exec/tfexec/validate.go | 44 + .../terraform-exec/tfexec/version.go | 208 ++ .../terraform-exec/tfexec/workspace_delete.go | 81 + .../terraform-exec/tfexec/workspace_list.go | 46 + .../terraform-exec/tfexec/workspace_new.go | 83 + .../terraform-exec/tfexec/workspace_select.go | 10 + .../terraform-exec/tfexec/workspace_show.go | 35 + .../hashicorp/terraform-json/.gitignore | 3 + .../hashicorp/terraform-json/.go-version | 1 + .../hashicorp/terraform-json/LICENSE | 373 ++++ .../hashicorp/terraform-json/Makefile | 21 + .../hashicorp/terraform-json/README.md | 33 + .../hashicorp/terraform-json/action.go | 104 + .../hashicorp/terraform-json/config.go | 194 ++ .../hashicorp/terraform-json/expression.go | 127 ++ .../hashicorp/terraform-json/plan.go | 202 ++ .../hashicorp/terraform-json/schemas.go | 281 +++ .../hashicorp/terraform-json/state.go | 206 ++ .../hashicorp/terraform-json/tfjson.go | 9 + .../hashicorp/terraform-json/validate.go | 149 ++ .../hashicorp/terraform-json/version.go | 11 + .../hashicorp/terraform-plugin-docs/LICENSE | 373 ++++ .../cmd/tfplugindocs/main.go | 26 + .../cmd/tfplugindocs/version.go | 7 + .../internal/cmd/generate.go | 109 + .../terraform-plugin-docs/internal/cmd/run.go | 99 + .../internal/cmd/serve.go | 3 + .../internal/cmd/validate.go | 80 + .../internal/mdplain/mdplain.go | 12 + .../internal/mdplain/renderer.go | 218 ++ .../internal/provider/generate.go | 568 +++++ .../internal/provider/template.go | 261 +++ .../internal/provider/util.go | 136 ++ .../internal/provider/validate.go | 266 +++ .../internal/tmplfuncs/tmplfuncs.go | 51 + .../schemamd/behaviors.go | 76 + .../terraform-plugin-docs/schemamd/render.go | 511 +++++ .../schemamd/write_attribute_description.go | 72 + .../schemamd/write_block_type_description.go | 96 + ...write_nested_attribute_type_description.go | 111 + .../schemamd/write_type.go | 62 + vendor/github.com/huandu/xstrings/.gitignore | 24 + vendor/github.com/huandu/xstrings/.travis.yml | 7 + .../huandu/xstrings/CONTRIBUTING.md | 23 + vendor/github.com/huandu/xstrings/LICENSE | 22 + vendor/github.com/huandu/xstrings/README.md | 117 + vendor/github.com/huandu/xstrings/common.go | 21 + vendor/github.com/huandu/xstrings/convert.go | 590 +++++ vendor/github.com/huandu/xstrings/count.go | 120 ++ vendor/github.com/huandu/xstrings/doc.go | 8 + vendor/github.com/huandu/xstrings/format.go | 169 ++ .../github.com/huandu/xstrings/manipulate.go | 216 ++ .../huandu/xstrings/stringbuilder.go | 7 + .../huandu/xstrings/stringbuilder_go110.go | 9 + .../github.com/huandu/xstrings/translate.go | 546 +++++ .../github.com/imdario/mergo/.deepsource.toml | 12 + vendor/github.com/imdario/mergo/.gitignore | 33 + vendor/github.com/imdario/mergo/.travis.yml | 12 + .../imdario/mergo/CODE_OF_CONDUCT.md | 46 + vendor/github.com/imdario/mergo/LICENSE | 28 + vendor/github.com/imdario/mergo/README.md | 235 ++ vendor/github.com/imdario/mergo/doc.go | 143 ++ vendor/github.com/imdario/mergo/map.go | 178 ++ vendor/github.com/imdario/mergo/merge.go | 380 ++++ vendor/github.com/imdario/mergo/mergo.go | 78 + vendor/github.com/mitchellh/cli/LICENSE | 354 +++ vendor/github.com/mitchellh/cli/Makefile | 17 + vendor/github.com/mitchellh/cli/README.md | 66 + .../github.com/mitchellh/cli/autocomplete.go | 43 + vendor/github.com/mitchellh/cli/cli.go | 742 +++++++ vendor/github.com/mitchellh/cli/command.go | 67 + .../github.com/mitchellh/cli/command_mock.go | 63 + vendor/github.com/mitchellh/cli/help.go | 79 + vendor/github.com/mitchellh/cli/ui.go | 187 ++ vendor/github.com/mitchellh/cli/ui_colored.go | 73 + .../github.com/mitchellh/cli/ui_concurrent.go | 54 + vendor/github.com/mitchellh/cli/ui_mock.go | 116 + vendor/github.com/mitchellh/cli/ui_writer.go | 18 + vendor/github.com/posener/complete/.gitignore | 4 + .../github.com/posener/complete/.travis.yml | 16 + .../github.com/posener/complete/LICENSE.txt | 21 + vendor/github.com/posener/complete/README.md | 131 ++ vendor/github.com/posener/complete/args.go | 114 + vendor/github.com/posener/complete/cmd/cmd.go | 128 ++ .../posener/complete/cmd/install/bash.go | 37 + .../posener/complete/cmd/install/fish.go | 69 + .../posener/complete/cmd/install/install.go | 148 ++ .../posener/complete/cmd/install/utils.go | 140 ++ .../posener/complete/cmd/install/zsh.go | 44 + vendor/github.com/posener/complete/command.go | 111 + .../github.com/posener/complete/complete.go | 104 + vendor/github.com/posener/complete/doc.go | 110 + .../github.com/posener/complete/goreadme.json | 9 + vendor/github.com/posener/complete/log.go | 22 + vendor/github.com/posener/complete/predict.go | 41 + .../posener/complete/predict_files.go | 174 ++ .../posener/complete/predict_set.go | 12 + .../russross/blackfriday/.gitignore | 8 + .../russross/blackfriday/.travis.yml | 18 + .../russross/blackfriday/LICENSE.txt | 28 + .../github.com/russross/blackfriday/README.md | 364 ++++ .../github.com/russross/blackfriday/block.go | 1480 +++++++++++++ vendor/github.com/russross/blackfriday/doc.go | 32 + .../github.com/russross/blackfriday/html.go | 945 ++++++++ .../github.com/russross/blackfriday/inline.go | 1154 ++++++++++ .../github.com/russross/blackfriday/latex.go | 334 +++ .../russross/blackfriday/markdown.go | 943 ++++++++ .../russross/blackfriday/smartypants.go | 430 ++++ .../github.com/shopspring/decimal/.gitignore | 9 + .../github.com/shopspring/decimal/.travis.yml | 19 + .../shopspring/decimal/CHANGELOG.md | 49 + vendor/github.com/shopspring/decimal/LICENSE | 45 + .../github.com/shopspring/decimal/README.md | 130 ++ .../shopspring/decimal/decimal-go.go | 415 ++++ .../github.com/shopspring/decimal/decimal.go | 1904 +++++++++++++++++ .../github.com/shopspring/decimal/rounding.go | 160 ++ vendor/github.com/spf13/cast/.gitignore | 25 + vendor/github.com/spf13/cast/LICENSE | 21 + vendor/github.com/spf13/cast/Makefile | 40 + vendor/github.com/spf13/cast/README.md | 75 + vendor/github.com/spf13/cast/cast.go | 176 ++ vendor/github.com/spf13/cast/caste.go | 1476 +++++++++++++ .../spf13/cast/timeformattype_string.go | 27 + vendor/golang.org/x/crypto/AUTHORS | 3 + vendor/golang.org/x/crypto/CONTRIBUTORS | 3 + vendor/golang.org/x/crypto/LICENSE | 27 + vendor/golang.org/x/crypto/PATENTS | 22 + vendor/golang.org/x/crypto/bcrypt/base64.go | 35 + vendor/golang.org/x/crypto/bcrypt/bcrypt.go | 295 +++ vendor/golang.org/x/crypto/blowfish/block.go | 159 ++ vendor/golang.org/x/crypto/blowfish/cipher.go | 99 + vendor/golang.org/x/crypto/blowfish/const.go | 199 ++ vendor/golang.org/x/crypto/cast5/cast5.go | 533 +++++ .../x/crypto/openpgp/armor/armor.go | 232 ++ .../x/crypto/openpgp/armor/encode.go | 161 ++ .../x/crypto/openpgp/canonical_text.go | 59 + .../x/crypto/openpgp/elgamal/elgamal.go | 130 ++ .../x/crypto/openpgp/errors/errors.go | 78 + vendor/golang.org/x/crypto/openpgp/keys.go | 693 ++++++ .../x/crypto/openpgp/packet/compressed.go | 123 ++ .../x/crypto/openpgp/packet/config.go | 91 + .../x/crypto/openpgp/packet/encrypted_key.go | 208 ++ .../x/crypto/openpgp/packet/literal.go | 89 + .../x/crypto/openpgp/packet/ocfb.go | 143 ++ .../openpgp/packet/one_pass_signature.go | 73 + .../x/crypto/openpgp/packet/opaque.go | 162 ++ .../x/crypto/openpgp/packet/packet.go | 590 +++++ .../x/crypto/openpgp/packet/private_key.go | 385 ++++ .../x/crypto/openpgp/packet/public_key.go | 753 +++++++ .../x/crypto/openpgp/packet/public_key_v3.go | 279 +++ .../x/crypto/openpgp/packet/reader.go | 76 + .../x/crypto/openpgp/packet/signature.go | 731 +++++++ .../x/crypto/openpgp/packet/signature_v3.go | 146 ++ .../openpgp/packet/symmetric_key_encrypted.go | 155 ++ .../openpgp/packet/symmetrically_encrypted.go | 290 +++ .../x/crypto/openpgp/packet/userattribute.go | 91 + .../x/crypto/openpgp/packet/userid.go | 160 ++ vendor/golang.org/x/crypto/openpgp/read.go | 448 ++++ vendor/golang.org/x/crypto/openpgp/s2k/s2k.go | 279 +++ vendor/golang.org/x/crypto/openpgp/write.go | 418 ++++ vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go | 77 + vendor/golang.org/x/crypto/scrypt/scrypt.go | 212 ++ vendor/modules.txt | 89 + 631 files changed, 52404 insertions(+), 6357 deletions(-) delete mode 100644 docs/data-sources/dcim_platform.md delete mode 100644 docs/data-sources/dcim_site.md delete mode 100644 docs/data-sources/ipam_aggregate.md delete mode 100644 docs/data-sources/ipam_ip_addresses.md delete mode 100644 docs/data-sources/ipam_role.md delete mode 100644 docs/data-sources/ipam_service.md delete mode 100644 docs/data-sources/ipam_vlan.md delete mode 100644 docs/data-sources/ipam_vlan_group.md delete mode 100644 docs/data-sources/json_circuits_circuit_terminations_list.md delete mode 100644 docs/data-sources/json_circuits_circuit_types_list.md delete mode 100644 docs/data-sources/json_circuits_circuits_list.md delete mode 100644 docs/data-sources/json_circuits_provider_networks_list.md delete mode 100644 docs/data-sources/json_circuits_providers_list.md delete mode 100644 docs/data-sources/json_dcim_cables_list.md delete mode 100644 docs/data-sources/json_dcim_console_port_templates_list.md delete mode 100644 docs/data-sources/json_dcim_console_ports_list.md delete mode 100644 docs/data-sources/json_dcim_console_server_port_templates_list.md delete mode 100644 docs/data-sources/json_dcim_console_server_ports_list.md delete mode 100644 docs/data-sources/json_dcim_device_bay_templates_list.md delete mode 100644 docs/data-sources/json_dcim_device_bays_list.md delete mode 100644 docs/data-sources/json_dcim_device_roles_list.md delete mode 100644 docs/data-sources/json_dcim_device_types_list.md delete mode 100644 docs/data-sources/json_dcim_devices_list.md delete mode 100644 docs/data-sources/json_dcim_front_port_templates_list.md delete mode 100644 docs/data-sources/json_dcim_front_ports_list.md delete mode 100644 docs/data-sources/json_dcim_interface_templates_list.md delete mode 100644 docs/data-sources/json_dcim_interfaces_list.md delete mode 100644 docs/data-sources/json_dcim_inventory_items_list.md delete mode 100644 docs/data-sources/json_dcim_locations_list.md delete mode 100644 docs/data-sources/json_dcim_manufacturers_list.md delete mode 100644 docs/data-sources/json_dcim_platforms_list.md delete mode 100644 docs/data-sources/json_dcim_power_feeds_list.md delete mode 100644 docs/data-sources/json_dcim_power_outlet_templates_list.md delete mode 100644 docs/data-sources/json_dcim_power_outlets_list.md delete mode 100644 docs/data-sources/json_dcim_power_panels_list.md delete mode 100644 docs/data-sources/json_dcim_power_port_templates_list.md delete mode 100644 docs/data-sources/json_dcim_power_ports_list.md delete mode 100644 docs/data-sources/json_dcim_rack_reservations_list.md delete mode 100644 docs/data-sources/json_dcim_rack_roles_list.md delete mode 100644 docs/data-sources/json_dcim_racks_list.md delete mode 100644 docs/data-sources/json_dcim_rear_port_templates_list.md delete mode 100644 docs/data-sources/json_dcim_rear_ports_list.md delete mode 100644 docs/data-sources/json_dcim_regions_list.md delete mode 100644 docs/data-sources/json_dcim_site_groups_list.md delete mode 100644 docs/data-sources/json_dcim_sites_list.md delete mode 100644 docs/data-sources/json_dcim_virtual_chassis_list.md delete mode 100644 docs/data-sources/json_extras_config_contexts_list.md delete mode 100644 docs/data-sources/json_extras_content_types_list.md delete mode 100644 docs/data-sources/json_extras_custom_fields_list.md delete mode 100644 docs/data-sources/json_extras_custom_links_list.md delete mode 100644 docs/data-sources/json_extras_export_templates_list.md delete mode 100644 docs/data-sources/json_extras_image_attachments_list.md delete mode 100644 docs/data-sources/json_extras_job_results_list.md delete mode 100644 docs/data-sources/json_extras_journal_entries_list.md delete mode 100644 docs/data-sources/json_extras_object_changes_list.md delete mode 100644 docs/data-sources/json_extras_tags_list.md delete mode 100644 docs/data-sources/json_extras_webhooks_list.md delete mode 100644 docs/data-sources/json_ipam_aggregates_list.md delete mode 100644 docs/data-sources/json_ipam_asns_list.md delete mode 100644 docs/data-sources/json_ipam_fhrp_group_assignments_list.md delete mode 100644 docs/data-sources/json_ipam_fhrp_groups_list.md delete mode 100644 docs/data-sources/json_ipam_ip_addresses_list.md delete mode 100644 docs/data-sources/json_ipam_ip_ranges_list.md delete mode 100644 docs/data-sources/json_ipam_prefixes_list.md delete mode 100644 docs/data-sources/json_ipam_rirs_list.md delete mode 100644 docs/data-sources/json_ipam_roles_list.md delete mode 100644 docs/data-sources/json_ipam_route_targets_list.md delete mode 100644 docs/data-sources/json_ipam_services_list.md delete mode 100644 docs/data-sources/json_ipam_vlan_groups_list.md delete mode 100644 docs/data-sources/json_ipam_vlans_list.md delete mode 100644 docs/data-sources/json_ipam_vrfs_list.md delete mode 100644 docs/data-sources/json_tenancy_contact_assignments_list.md delete mode 100644 docs/data-sources/json_tenancy_contact_groups_list.md delete mode 100644 docs/data-sources/json_tenancy_contact_roles_list.md delete mode 100644 docs/data-sources/json_tenancy_contacts_list.md delete mode 100644 docs/data-sources/json_tenancy_tenant_groups_list.md delete mode 100644 docs/data-sources/json_tenancy_tenants_list.md delete mode 100644 docs/data-sources/json_users_groups_list.md delete mode 100644 docs/data-sources/json_users_permissions_list.md delete mode 100644 docs/data-sources/json_users_tokens_list.md delete mode 100644 docs/data-sources/json_users_users_list.md delete mode 100644 docs/data-sources/json_virtualization_cluster_groups_list.md delete mode 100644 docs/data-sources/json_virtualization_cluster_types_list.md delete mode 100644 docs/data-sources/json_virtualization_clusters_list.md delete mode 100644 docs/data-sources/json_virtualization_interfaces_list.md delete mode 100644 docs/data-sources/json_virtualization_virtual_machines_list.md delete mode 100644 docs/data-sources/json_wireless_wireless_lan_groups_list.md delete mode 100644 docs/data-sources/json_wireless_wireless_lans_list.md delete mode 100644 docs/data-sources/json_wireless_wireless_links_list.md delete mode 100644 docs/data-sources/tenancy_contact.md delete mode 100644 docs/data-sources/tenancy_contact_group.md delete mode 100644 docs/data-sources/tenancy_contact_role.md delete mode 100644 docs/data-sources/tenancy_tenant.md delete mode 100644 docs/data-sources/tenancy_tenant_group.md delete mode 100644 docs/data-sources/virtualization_cluster.md delete mode 100644 docs/index.md delete mode 100644 docs/resources/ipam_aggregate.md delete mode 100644 docs/resources/ipam_ip_addresses.md delete mode 100644 docs/resources/ipam_prefix.md delete mode 100644 docs/resources/ipam_service.md delete mode 100644 docs/resources/ipam_vlan.md delete mode 100644 docs/resources/ipam_vlan_group.md delete mode 100644 docs/resources/tenancy_contact.md delete mode 100644 docs/resources/tenancy_contact_assignment.md delete mode 100644 docs/resources/tenancy_contact_group.md delete mode 100644 docs/resources/tenancy_contact_role.md delete mode 100644 docs/resources/tenancy_tenant.md delete mode 100644 docs/resources/tenancy_tenant_group.md delete mode 100644 docs/resources/virtualization_interface.md delete mode 100644 docs/resources/virtualization_vm.md create mode 100644 examples/data-sources/netbox_dcim_platform/data-source.tf create mode 100644 examples/data-sources/netbox_dcim_site/data-source.tf create mode 100644 examples/data-sources/netbox_ipam_aggregate/data-source.tf create mode 100644 examples/data-sources/netbox_ipam_ip_addresses/data-source.tf create mode 100644 examples/data-sources/netbox_ipam_role/data-source.tf create mode 100644 examples/data-sources/netbox_ipam_service/data-source.tf create mode 100644 examples/data-sources/netbox_ipam_vlan/data-source.tf create mode 100644 examples/data-sources/netbox_ipam_vlan_group/data-source.tf create mode 100644 examples/data-sources/netbox_json_circuits_circuit_terminations_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_circuits_circuit_types_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_circuits_circuits_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_circuits_provider_networks_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_circuits_providers_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_dcim_cables_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_dcim_console_port_templates_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_dcim_console_ports_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_dcim_console_server_port_templates_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_dcim_console_server_ports_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_dcim_device_bay_templates_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_dcim_device_bays_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_dcim_device_roles_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_dcim_device_types_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_dcim_devices_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_dcim_front_port_templates_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_dcim_front_ports_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_dcim_interface_templates_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_dcim_interfaces_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_dcim_inventory_items_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_dcim_locations_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_dcim_manufacturers_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_dcim_platforms_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_dcim_power_feeds_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_dcim_power_outlet_templates_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_dcim_power_outlets_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_dcim_power_panels_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_dcim_power_port_templates_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_dcim_power_ports_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_dcim_rack_reservations_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_dcim_rack_roles_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_dcim_racks_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_dcim_rear_port_templates_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_dcim_rear_ports_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_dcim_regions_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_dcim_site_groups_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_dcim_sites_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_dcim_virtual_chassis_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_extras_config_contexts_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_extras_content_types_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_extras_custom_fields_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_extras_custom_links_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_extras_export_templates_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_extras_image_attachments_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_extras_job_results_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_extras_journal_entries_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_extras_object_changes_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_extras_tags_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_extras_webhooks_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_ipam_aggregates_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_ipam_asns_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_ipam_fhrp_group_assignments_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_ipam_fhrp_groups_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_ipam_ip_addresses_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_ipam_ip_ranges_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_ipam_prefixes_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_ipam_rirs_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_ipam_roles_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_ipam_route_targets_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_ipam_services_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_ipam_vlan_groups_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_ipam_vlans_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_ipam_vrfs_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_tenancy_contact_assignments_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_tenancy_contact_groups_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_tenancy_contact_roles_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_tenancy_contacts_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_tenancy_tenant_groups_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_tenancy_tenants_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_users_groups_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_users_permissions_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_users_tokens_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_users_users_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_virtualization_cluster_groups_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_virtualization_cluster_types_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_virtualization_clusters_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_virtualization_interfaces_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_virtualization_virtual_machines_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_wireless_wireless_lan_groups_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_wireless_wireless_lans_list/data-source.tf create mode 100644 examples/data-sources/netbox_json_wireless_wireless_links_list/data-source.tf create mode 100644 examples/provider/provider.tf create mode 100644 examples/resources/netbox_ipam_aggregate/resource.tf create mode 100644 examples/resources/netbox_ipam_ip_addresses/resource.tf create mode 100644 examples/resources/netbox_ipam_prefix/resource.tf create mode 100644 examples/resources/netbox_ipam_service/resource.tf create mode 100644 examples/resources/netbox_ipam_vlan/resource.tf create mode 100644 examples/resources/netbox_ipam_vlan_group/resource.tf create mode 100644 examples/resources/netbox_tenancy_contact/resource.tf create mode 100644 examples/resources/netbox_tenancy_contact_assignment/resource.tf create mode 100644 examples/resources/netbox_tenancy_contact_group/resource.tf create mode 100644 examples/resources/netbox_tenancy_contact_role/resource.tf create mode 100644 examples/resources/netbox_tenancy_tenant/resource.tf create mode 100644 examples/resources/netbox_tenancy_tenant_group/resource.tf create mode 100644 examples/resources/netbox_virtualization_interface/resource.tf create mode 100644 examples/resources/netbox_virtualization_vm/resource.tf create mode 100644 templates/index.md.tmpl create mode 100644 tools/tools.go create mode 100644 vendor/github.com/Masterminds/goutils/.travis.yml create mode 100644 vendor/github.com/Masterminds/goutils/CHANGELOG.md create mode 100644 vendor/github.com/Masterminds/goutils/LICENSE.txt create mode 100644 vendor/github.com/Masterminds/goutils/README.md create mode 100644 vendor/github.com/Masterminds/goutils/appveyor.yml create mode 100644 vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go create mode 100644 vendor/github.com/Masterminds/goutils/randomstringutils.go create mode 100644 vendor/github.com/Masterminds/goutils/stringutils.go create mode 100644 vendor/github.com/Masterminds/goutils/wordutils.go create mode 100644 vendor/github.com/Masterminds/semver/v3/.gitignore create mode 100644 vendor/github.com/Masterminds/semver/v3/.golangci.yml create mode 100644 vendor/github.com/Masterminds/semver/v3/CHANGELOG.md create mode 100644 vendor/github.com/Masterminds/semver/v3/LICENSE.txt create mode 100644 vendor/github.com/Masterminds/semver/v3/Makefile create mode 100644 vendor/github.com/Masterminds/semver/v3/README.md create mode 100644 vendor/github.com/Masterminds/semver/v3/collection.go create mode 100644 vendor/github.com/Masterminds/semver/v3/constraints.go create mode 100644 vendor/github.com/Masterminds/semver/v3/doc.go create mode 100644 vendor/github.com/Masterminds/semver/v3/fuzz.go create mode 100644 vendor/github.com/Masterminds/semver/v3/version.go create mode 100644 vendor/github.com/Masterminds/sprig/v3/.gitignore create mode 100644 vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md create mode 100644 vendor/github.com/Masterminds/sprig/v3/LICENSE.txt create mode 100644 vendor/github.com/Masterminds/sprig/v3/Makefile create mode 100644 vendor/github.com/Masterminds/sprig/v3/README.md create mode 100644 vendor/github.com/Masterminds/sprig/v3/crypto.go create mode 100644 vendor/github.com/Masterminds/sprig/v3/date.go create mode 100644 vendor/github.com/Masterminds/sprig/v3/defaults.go create mode 100644 vendor/github.com/Masterminds/sprig/v3/dict.go create mode 100644 vendor/github.com/Masterminds/sprig/v3/doc.go create mode 100644 vendor/github.com/Masterminds/sprig/v3/functions.go create mode 100644 vendor/github.com/Masterminds/sprig/v3/list.go create mode 100644 vendor/github.com/Masterminds/sprig/v3/network.go create mode 100644 vendor/github.com/Masterminds/sprig/v3/numeric.go create mode 100644 vendor/github.com/Masterminds/sprig/v3/reflect.go create mode 100644 vendor/github.com/Masterminds/sprig/v3/regex.go create mode 100644 vendor/github.com/Masterminds/sprig/v3/semver.go create mode 100644 vendor/github.com/Masterminds/sprig/v3/strings.go create mode 100644 vendor/github.com/Masterminds/sprig/v3/url.go create mode 100644 vendor/github.com/armon/go-radix/.gitignore create mode 100644 vendor/github.com/armon/go-radix/.travis.yml create mode 100644 vendor/github.com/armon/go-radix/LICENSE create mode 100644 vendor/github.com/armon/go-radix/README.md create mode 100644 vendor/github.com/armon/go-radix/radix.go create mode 100644 vendor/github.com/bgentry/speakeasy/.gitignore create mode 100644 vendor/github.com/bgentry/speakeasy/LICENSE create mode 100644 vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS create mode 100644 vendor/github.com/bgentry/speakeasy/Readme.md create mode 100644 vendor/github.com/bgentry/speakeasy/speakeasy.go create mode 100644 vendor/github.com/bgentry/speakeasy/speakeasy_unix.go create mode 100644 vendor/github.com/bgentry/speakeasy/speakeasy_windows.go create mode 100644 vendor/github.com/google/uuid/.travis.yml create mode 100644 vendor/github.com/google/uuid/CONTRIBUTING.md create mode 100644 vendor/github.com/google/uuid/CONTRIBUTORS create mode 100644 vendor/github.com/google/uuid/LICENSE create mode 100644 vendor/github.com/google/uuid/README.md create mode 100644 vendor/github.com/google/uuid/dce.go create mode 100644 vendor/github.com/google/uuid/doc.go create mode 100644 vendor/github.com/google/uuid/hash.go create mode 100644 vendor/github.com/google/uuid/marshal.go create mode 100644 vendor/github.com/google/uuid/node.go create mode 100644 vendor/github.com/google/uuid/node_js.go create mode 100644 vendor/github.com/google/uuid/node_net.go create mode 100644 vendor/github.com/google/uuid/null.go create mode 100644 vendor/github.com/google/uuid/sql.go create mode 100644 vendor/github.com/google/uuid/time.go create mode 100644 vendor/github.com/google/uuid/util.go create mode 100644 vendor/github.com/google/uuid/uuid.go create mode 100644 vendor/github.com/google/uuid/version1.go create mode 100644 vendor/github.com/google/uuid/version4.go create mode 100644 vendor/github.com/hashicorp/go-checkpoint/LICENSE create mode 100644 vendor/github.com/hashicorp/go-checkpoint/README.md create mode 100644 vendor/github.com/hashicorp/go-checkpoint/check.go create mode 100644 vendor/github.com/hashicorp/go-checkpoint/telemetry.go create mode 100644 vendor/github.com/hashicorp/go-checkpoint/versions.go create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/LICENSE create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/README.md create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/doc.go create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/handlers.go create mode 100644 vendor/github.com/hashicorp/hc-install/.go-version create mode 100644 vendor/github.com/hashicorp/hc-install/.goreleaser.yml create mode 100644 vendor/github.com/hashicorp/hc-install/LICENSE create mode 100644 vendor/github.com/hashicorp/hc-install/README.md create mode 100644 vendor/github.com/hashicorp/hc-install/checkpoint/latest_version.go create mode 100644 vendor/github.com/hashicorp/hc-install/errors/errors.go create mode 100644 vendor/github.com/hashicorp/hc-install/fs/any_version.go create mode 100644 vendor/github.com/hashicorp/hc-install/fs/exact_version.go create mode 100644 vendor/github.com/hashicorp/hc-install/fs/fs.go create mode 100644 vendor/github.com/hashicorp/hc-install/fs/fs_unix.go create mode 100644 vendor/github.com/hashicorp/hc-install/fs/fs_windows.go create mode 100644 vendor/github.com/hashicorp/hc-install/installer.go create mode 100644 vendor/github.com/hashicorp/hc-install/internal/build/get_go_version.go create mode 100644 vendor/github.com/hashicorp/hc-install/internal/build/go_build.go create mode 100644 vendor/github.com/hashicorp/hc-install/internal/build/go_is_installed.go create mode 100644 vendor/github.com/hashicorp/hc-install/internal/build/install_go_version.go create mode 100644 vendor/github.com/hashicorp/hc-install/internal/httpclient/httpclient.go create mode 100644 vendor/github.com/hashicorp/hc-install/internal/pubkey/pubkey.go create mode 100644 vendor/github.com/hashicorp/hc-install/internal/releasesjson/checksum_downloader.go create mode 100644 vendor/github.com/hashicorp/hc-install/internal/releasesjson/downloader.go create mode 100644 vendor/github.com/hashicorp/hc-install/internal/releasesjson/product_version.go create mode 100644 vendor/github.com/hashicorp/hc-install/internal/releasesjson/releases.go create mode 100644 vendor/github.com/hashicorp/hc-install/internal/src/src.go create mode 100644 vendor/github.com/hashicorp/hc-install/internal/validators/validators.go create mode 100644 vendor/github.com/hashicorp/hc-install/internal/version/version.go create mode 100644 vendor/github.com/hashicorp/hc-install/product/consul.go create mode 100644 vendor/github.com/hashicorp/hc-install/product/product.go create mode 100644 vendor/github.com/hashicorp/hc-install/product/terraform.go create mode 100644 vendor/github.com/hashicorp/hc-install/releases/exact_version.go create mode 100644 vendor/github.com/hashicorp/hc-install/releases/latest_version.go create mode 100644 vendor/github.com/hashicorp/hc-install/releases/releases.go create mode 100644 vendor/github.com/hashicorp/hc-install/releases/versions.go create mode 100644 vendor/github.com/hashicorp/hc-install/src/src.go create mode 100644 vendor/github.com/hashicorp/terraform-exec/LICENSE create mode 100644 vendor/github.com/hashicorp/terraform-exec/internal/version/version.go create mode 100644 vendor/github.com/hashicorp/terraform-exec/tfexec/apply.go create mode 100644 vendor/github.com/hashicorp/terraform-exec/tfexec/cmd.go create mode 100644 vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_default.go create mode 100644 vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_linux.go create mode 100644 vendor/github.com/hashicorp/terraform-exec/tfexec/destroy.go create mode 100644 vendor/github.com/hashicorp/terraform-exec/tfexec/doc.go create mode 100644 vendor/github.com/hashicorp/terraform-exec/tfexec/errors.go create mode 100644 vendor/github.com/hashicorp/terraform-exec/tfexec/exit_errors.go create mode 100644 vendor/github.com/hashicorp/terraform-exec/tfexec/fmt.go create mode 100644 vendor/github.com/hashicorp/terraform-exec/tfexec/force_unlock.go create mode 100644 vendor/github.com/hashicorp/terraform-exec/tfexec/get.go create mode 100644 vendor/github.com/hashicorp/terraform-exec/tfexec/graph.go create mode 100644 vendor/github.com/hashicorp/terraform-exec/tfexec/import.go create mode 100644 vendor/github.com/hashicorp/terraform-exec/tfexec/init.go create mode 100644 vendor/github.com/hashicorp/terraform-exec/tfexec/options.go create mode 100644 vendor/github.com/hashicorp/terraform-exec/tfexec/output.go create mode 100644 vendor/github.com/hashicorp/terraform-exec/tfexec/plan.go create mode 100644 vendor/github.com/hashicorp/terraform-exec/tfexec/providers_lock.go create mode 100644 vendor/github.com/hashicorp/terraform-exec/tfexec/providers_schema.go create mode 100644 vendor/github.com/hashicorp/terraform-exec/tfexec/refresh.go create mode 100644 vendor/github.com/hashicorp/terraform-exec/tfexec/show.go create mode 100644 vendor/github.com/hashicorp/terraform-exec/tfexec/state_mv.go create mode 100644 vendor/github.com/hashicorp/terraform-exec/tfexec/state_rm.go create mode 100644 vendor/github.com/hashicorp/terraform-exec/tfexec/taint.go create mode 100644 vendor/github.com/hashicorp/terraform-exec/tfexec/terraform.go create mode 100644 vendor/github.com/hashicorp/terraform-exec/tfexec/untaint.go create mode 100644 vendor/github.com/hashicorp/terraform-exec/tfexec/upgrade012.go create mode 100644 vendor/github.com/hashicorp/terraform-exec/tfexec/upgrade013.go create mode 100644 vendor/github.com/hashicorp/terraform-exec/tfexec/validate.go create mode 100644 vendor/github.com/hashicorp/terraform-exec/tfexec/version.go create mode 100644 vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_delete.go create mode 100644 vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_list.go create mode 100644 vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_new.go create mode 100644 vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_select.go create mode 100644 vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_show.go create mode 100644 vendor/github.com/hashicorp/terraform-json/.gitignore create mode 100644 vendor/github.com/hashicorp/terraform-json/.go-version create mode 100644 vendor/github.com/hashicorp/terraform-json/LICENSE create mode 100644 vendor/github.com/hashicorp/terraform-json/Makefile create mode 100644 vendor/github.com/hashicorp/terraform-json/README.md create mode 100644 vendor/github.com/hashicorp/terraform-json/action.go create mode 100644 vendor/github.com/hashicorp/terraform-json/config.go create mode 100644 vendor/github.com/hashicorp/terraform-json/expression.go create mode 100644 vendor/github.com/hashicorp/terraform-json/plan.go create mode 100644 vendor/github.com/hashicorp/terraform-json/schemas.go create mode 100644 vendor/github.com/hashicorp/terraform-json/state.go create mode 100644 vendor/github.com/hashicorp/terraform-json/tfjson.go create mode 100644 vendor/github.com/hashicorp/terraform-json/validate.go create mode 100644 vendor/github.com/hashicorp/terraform-json/version.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-docs/LICENSE create mode 100644 vendor/github.com/hashicorp/terraform-plugin-docs/cmd/tfplugindocs/main.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-docs/cmd/tfplugindocs/version.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-docs/internal/cmd/generate.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-docs/internal/cmd/run.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-docs/internal/cmd/serve.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-docs/internal/cmd/validate.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-docs/internal/mdplain/mdplain.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-docs/internal/mdplain/renderer.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-docs/internal/provider/generate.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-docs/internal/provider/template.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-docs/internal/provider/util.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-docs/internal/provider/validate.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-docs/internal/tmplfuncs/tmplfuncs.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-docs/schemamd/behaviors.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-docs/schemamd/render.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-docs/schemamd/write_attribute_description.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-docs/schemamd/write_block_type_description.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-docs/schemamd/write_nested_attribute_type_description.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-docs/schemamd/write_type.go create mode 100644 vendor/github.com/huandu/xstrings/.gitignore create mode 100644 vendor/github.com/huandu/xstrings/.travis.yml create mode 100644 vendor/github.com/huandu/xstrings/CONTRIBUTING.md create mode 100644 vendor/github.com/huandu/xstrings/LICENSE create mode 100644 vendor/github.com/huandu/xstrings/README.md create mode 100644 vendor/github.com/huandu/xstrings/common.go create mode 100644 vendor/github.com/huandu/xstrings/convert.go create mode 100644 vendor/github.com/huandu/xstrings/count.go create mode 100644 vendor/github.com/huandu/xstrings/doc.go create mode 100644 vendor/github.com/huandu/xstrings/format.go create mode 100644 vendor/github.com/huandu/xstrings/manipulate.go create mode 100644 vendor/github.com/huandu/xstrings/stringbuilder.go create mode 100644 vendor/github.com/huandu/xstrings/stringbuilder_go110.go create mode 100644 vendor/github.com/huandu/xstrings/translate.go create mode 100644 vendor/github.com/imdario/mergo/.deepsource.toml create mode 100644 vendor/github.com/imdario/mergo/.gitignore create mode 100644 vendor/github.com/imdario/mergo/.travis.yml create mode 100644 vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/imdario/mergo/LICENSE create mode 100644 vendor/github.com/imdario/mergo/README.md create mode 100644 vendor/github.com/imdario/mergo/doc.go create mode 100644 vendor/github.com/imdario/mergo/map.go create mode 100644 vendor/github.com/imdario/mergo/merge.go create mode 100644 vendor/github.com/imdario/mergo/mergo.go create mode 100644 vendor/github.com/mitchellh/cli/LICENSE create mode 100644 vendor/github.com/mitchellh/cli/Makefile create mode 100644 vendor/github.com/mitchellh/cli/README.md create mode 100644 vendor/github.com/mitchellh/cli/autocomplete.go create mode 100644 vendor/github.com/mitchellh/cli/cli.go create mode 100644 vendor/github.com/mitchellh/cli/command.go create mode 100644 vendor/github.com/mitchellh/cli/command_mock.go create mode 100644 vendor/github.com/mitchellh/cli/help.go create mode 100644 vendor/github.com/mitchellh/cli/ui.go create mode 100644 vendor/github.com/mitchellh/cli/ui_colored.go create mode 100644 vendor/github.com/mitchellh/cli/ui_concurrent.go create mode 100644 vendor/github.com/mitchellh/cli/ui_mock.go create mode 100644 vendor/github.com/mitchellh/cli/ui_writer.go create mode 100644 vendor/github.com/posener/complete/.gitignore create mode 100644 vendor/github.com/posener/complete/.travis.yml create mode 100644 vendor/github.com/posener/complete/LICENSE.txt create mode 100644 vendor/github.com/posener/complete/README.md create mode 100644 vendor/github.com/posener/complete/args.go create mode 100644 vendor/github.com/posener/complete/cmd/cmd.go create mode 100644 vendor/github.com/posener/complete/cmd/install/bash.go create mode 100644 vendor/github.com/posener/complete/cmd/install/fish.go create mode 100644 vendor/github.com/posener/complete/cmd/install/install.go create mode 100644 vendor/github.com/posener/complete/cmd/install/utils.go create mode 100644 vendor/github.com/posener/complete/cmd/install/zsh.go create mode 100644 vendor/github.com/posener/complete/command.go create mode 100644 vendor/github.com/posener/complete/complete.go create mode 100644 vendor/github.com/posener/complete/doc.go create mode 100644 vendor/github.com/posener/complete/goreadme.json create mode 100644 vendor/github.com/posener/complete/log.go create mode 100644 vendor/github.com/posener/complete/predict.go create mode 100644 vendor/github.com/posener/complete/predict_files.go create mode 100644 vendor/github.com/posener/complete/predict_set.go create mode 100644 vendor/github.com/russross/blackfriday/.gitignore create mode 100644 vendor/github.com/russross/blackfriday/.travis.yml create mode 100644 vendor/github.com/russross/blackfriday/LICENSE.txt create mode 100644 vendor/github.com/russross/blackfriday/README.md create mode 100644 vendor/github.com/russross/blackfriday/block.go create mode 100644 vendor/github.com/russross/blackfriday/doc.go create mode 100644 vendor/github.com/russross/blackfriday/html.go create mode 100644 vendor/github.com/russross/blackfriday/inline.go create mode 100644 vendor/github.com/russross/blackfriday/latex.go create mode 100644 vendor/github.com/russross/blackfriday/markdown.go create mode 100644 vendor/github.com/russross/blackfriday/smartypants.go create mode 100644 vendor/github.com/shopspring/decimal/.gitignore create mode 100644 vendor/github.com/shopspring/decimal/.travis.yml create mode 100644 vendor/github.com/shopspring/decimal/CHANGELOG.md create mode 100644 vendor/github.com/shopspring/decimal/LICENSE create mode 100644 vendor/github.com/shopspring/decimal/README.md create mode 100644 vendor/github.com/shopspring/decimal/decimal-go.go create mode 100644 vendor/github.com/shopspring/decimal/decimal.go create mode 100644 vendor/github.com/shopspring/decimal/rounding.go create mode 100644 vendor/github.com/spf13/cast/.gitignore create mode 100644 vendor/github.com/spf13/cast/LICENSE create mode 100644 vendor/github.com/spf13/cast/Makefile create mode 100644 vendor/github.com/spf13/cast/README.md create mode 100644 vendor/github.com/spf13/cast/cast.go create mode 100644 vendor/github.com/spf13/cast/caste.go create mode 100644 vendor/github.com/spf13/cast/timeformattype_string.go create mode 100644 vendor/golang.org/x/crypto/AUTHORS create mode 100644 vendor/golang.org/x/crypto/CONTRIBUTORS create mode 100644 vendor/golang.org/x/crypto/LICENSE create mode 100644 vendor/golang.org/x/crypto/PATENTS create mode 100644 vendor/golang.org/x/crypto/bcrypt/base64.go create mode 100644 vendor/golang.org/x/crypto/bcrypt/bcrypt.go create mode 100644 vendor/golang.org/x/crypto/blowfish/block.go create mode 100644 vendor/golang.org/x/crypto/blowfish/cipher.go create mode 100644 vendor/golang.org/x/crypto/blowfish/const.go create mode 100644 vendor/golang.org/x/crypto/cast5/cast5.go create mode 100644 vendor/golang.org/x/crypto/openpgp/armor/armor.go create mode 100644 vendor/golang.org/x/crypto/openpgp/armor/encode.go create mode 100644 vendor/golang.org/x/crypto/openpgp/canonical_text.go create mode 100644 vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go create mode 100644 vendor/golang.org/x/crypto/openpgp/errors/errors.go create mode 100644 vendor/golang.org/x/crypto/openpgp/keys.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/compressed.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/config.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/literal.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/ocfb.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/opaque.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/packet.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/private_key.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/public_key.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/reader.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/signature.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/userattribute.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/userid.go create mode 100644 vendor/golang.org/x/crypto/openpgp/read.go create mode 100644 vendor/golang.org/x/crypto/openpgp/s2k/s2k.go create mode 100644 vendor/golang.org/x/crypto/openpgp/write.go create mode 100644 vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go create mode 100644 vendor/golang.org/x/crypto/scrypt/scrypt.go diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml index 9da5bbd66..796fd876c 100644 --- a/.github/workflows/master.yml +++ b/.github/workflows/master.yml @@ -44,13 +44,19 @@ jobs: go fmt netbox/* shell: bash + - name: Go generate + run: | + cd "$(dirname $GITHUB_WORKSPACE)/src/github.com/smutel/terraform-provider-netbox" + go generate + shell: bash + - name: Commit changes uses: EndBug/add-and-commit@v9.0.0 with: - add: 'netbox' + add: 'netbox docs' author_name: smutel default_author: github_actor - message: 'ci: Go fmt' + message: 'ci: Go fmt & go generate' - name: Build run: | diff --git a/docs/data-sources/dcim_platform.md b/docs/data-sources/dcim_platform.md deleted file mode 100644 index c76c9462a..000000000 --- a/docs/data-sources/dcim_platform.md +++ /dev/null @@ -1,22 +0,0 @@ -# netbox\_dcim\_platform Data Source - -Get info about dcim platform from netbox. - -## Example Usage - -```hcl -data "netbox_dcim_platform" "platform_test" { - slug = "TestPlatform" -} -``` - -## Argument Reference - -The following arguments are supported: -* ``slug`` - (Required) The slug of the dcim platform. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``id`` - The id (ref in Netbox) of this object. -* ``content_type`` - The content type of this object. diff --git a/docs/data-sources/dcim_site.md b/docs/data-sources/dcim_site.md deleted file mode 100644 index 63341f557..000000000 --- a/docs/data-sources/dcim_site.md +++ /dev/null @@ -1,22 +0,0 @@ -# netbox\_dcim\_site Data Source - -Get info about dcim site from netbox. - -## Example Usage - -```hcl -data "netbox_dcim_site" "site_test" { - slug = "TestSite" -} -``` - -## Argument Reference - -The following arguments are supported: -* ``slug`` - (Required) The slug of the dcim site. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``id`` - The id (ref in Netbox) of this object. -* ``content_type`` - The content type of this object. diff --git a/docs/data-sources/ipam_aggregate.md b/docs/data-sources/ipam_aggregate.md deleted file mode 100644 index 8ab72c0b3..000000000 --- a/docs/data-sources/ipam_aggregate.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_ipam\_aggregate Data Source - -Get info about aggregate from Netbox. - -## Example Usage - -```hcl -data "netbox_ipam_aggregate" "aggregate_test" { - prefix = "192.168.56.0/24" - rir_id = 1 -} -``` - -## Argument Reference - -The following arguments are supported: -* ``prefix`` - (Required) The prefix (with mask) used for this object. -* ``rir_id`` - (Required) The RIR id linked to this object. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``id`` - The id (ref in Netbox) of this object. -* ``content_type`` - The content type of this object. diff --git a/docs/data-sources/ipam_ip_addresses.md b/docs/data-sources/ipam_ip_addresses.md deleted file mode 100644 index a079248f2..000000000 --- a/docs/data-sources/ipam_ip_addresses.md +++ /dev/null @@ -1,22 +0,0 @@ -# netbox\_ipam\_ip\_addresses Data Source - -Get info about ipam IP addresses from netbox. - -## Example Usage - -```hcl -data "netbox_ipam_ip_addresses" "ipaddress_test" { - address = "192.168.56.1/24" -} -``` - -## Argument Reference - -The following arguments are supported: -* ``address`` - (Required) The address (with mask) of the ipam IP address. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``id`` - The id (ref in Netbox) of this object. -* ``content_type`` - The content type of this object. diff --git a/docs/data-sources/ipam_role.md b/docs/data-sources/ipam_role.md deleted file mode 100644 index 3d83fda85..000000000 --- a/docs/data-sources/ipam_role.md +++ /dev/null @@ -1,22 +0,0 @@ -# netbox\_ipam\_role Data Source - -Get info about ipam role from netbox. - -## Example Usage - -```hcl -data "netbox_ipam_role" "role_test" { - slug = "TestRole" -} -``` - -## Argument Reference - -The following arguments are supported: -* ``slug`` - (Required) The slug of the ipam role. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``id`` - The id (ref in Netbox) of this object. -* ``content_type`` - The content type of this object. diff --git a/docs/data-sources/ipam_service.md b/docs/data-sources/ipam_service.md deleted file mode 100644 index 71d0688f9..000000000 --- a/docs/data-sources/ipam_service.md +++ /dev/null @@ -1,29 +0,0 @@ -# netbox\_ipam\_service Data Source - -Get info about ipam service from netbox. - -## Example Usage - -```hcl -data "netbox_ipam_service" "service_test" { - device_id = 5 - name = "Mail" - port = 25 - protocol = "tcp" -} -``` - -## Argument Reference - -The following arguments are supported: -* ``device_id`` - (Optional) The ID of the device linked to this object. -* ``name`` - (Required) The name of this object. -* ``port`` - (Required) The port of this object. -* ``protocol`` - (Required) The protocol of this service (tcp or udp). -* ``virtualmachine_id`` - (Optional) The ID of the vm linked to this object. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``id`` - The id (ref in Netbox) of this object. -* ``content_type`` - The content type of this object. diff --git a/docs/data-sources/ipam_vlan.md b/docs/data-sources/ipam_vlan.md deleted file mode 100644 index 9d0a9e54e..000000000 --- a/docs/data-sources/ipam_vlan.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_ipam\_vlan Data Source - -Get info about ipam vlan from netbox. - -## Example Usage - -```hcl -data "netbox_ipam_vlan" "vlan_test" { - vlan_id = 15 - vlan_group_id = 16 -} -``` - -## Argument Reference - -The following arguments are supported: -* ``vlan_id`` - (Required) The ID of the ipam vlan. -* ``vlan_group_id`` - (Optional) The id of the vlan group linked to this vlan. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``id`` - The id (ref in Netbox) of this object. -* ``content_type`` - The content type of this object. diff --git a/docs/data-sources/ipam_vlan_group.md b/docs/data-sources/ipam_vlan_group.md deleted file mode 100644 index a3509f85b..000000000 --- a/docs/data-sources/ipam_vlan_group.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_ipam\_vlan\_group Data Source - -Get info about ipam vlan group from netbox. - -## Example Usage - -```hcl -data "netbox_ipam_vlan_group" "vlan_group_test" { - slug = "TestVlanGroup" - site_id = 15 -} -``` - -## Argument Reference - -The following arguments are supported: -* ``slug`` - (Required) The slug of the ipam vlan group. -* ``site_id`` - (Optional) The site_id of the ipam vlan groups. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``id`` - The id (ref in Netbox) of this object. -* ``content_type`` - The content type of this object. diff --git a/docs/data-sources/json_circuits_circuit_terminations_list.md b/docs/data-sources/json_circuits_circuit_terminations_list.md deleted file mode 100644 index f886eeaf7..000000000 --- a/docs/data-sources/json_circuits_circuit_terminations_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_circuits\_circuit\_terminations\_list Data Source - -Get json output from the circuits_circuit_terminations_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_circuits_circuit_terminations_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_circuits_circuit_terminations_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_circuits_circuit_types_list.md b/docs/data-sources/json_circuits_circuit_types_list.md deleted file mode 100644 index 73e6eb0d4..000000000 --- a/docs/data-sources/json_circuits_circuit_types_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_circuits\_circuit\_types\_list Data Source - -Get json output from the circuits_circuit_types_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_circuits_circuit_types_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_circuits_circuit_types_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_circuits_circuits_list.md b/docs/data-sources/json_circuits_circuits_list.md deleted file mode 100644 index 6617ce1f6..000000000 --- a/docs/data-sources/json_circuits_circuits_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_circuits\_circuits\_list Data Source - -Get json output from the circuits_circuits_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_circuits_circuits_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_circuits_circuits_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_circuits_provider_networks_list.md b/docs/data-sources/json_circuits_provider_networks_list.md deleted file mode 100644 index 446c328ce..000000000 --- a/docs/data-sources/json_circuits_provider_networks_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_circuits\_provider\_networks\_list Data Source - -Get json output from the circuits_provider_networks_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_circuits_provider_networks_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_circuits_provider_networks_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_circuits_providers_list.md b/docs/data-sources/json_circuits_providers_list.md deleted file mode 100644 index bfe751fe0..000000000 --- a/docs/data-sources/json_circuits_providers_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_circuits\_providers\_list Data Source - -Get json output from the circuits_providers_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_circuits_providers_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_circuits_providers_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_dcim_cables_list.md b/docs/data-sources/json_dcim_cables_list.md deleted file mode 100644 index 2f4903e57..000000000 --- a/docs/data-sources/json_dcim_cables_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_dcim\_cables\_list Data Source - -Get json output from the dcim_cables_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_dcim_cables_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_dcim_cables_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_dcim_console_port_templates_list.md b/docs/data-sources/json_dcim_console_port_templates_list.md deleted file mode 100644 index 80a38118e..000000000 --- a/docs/data-sources/json_dcim_console_port_templates_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_dcim\_console\_port\_templates\_list Data Source - -Get json output from the dcim_console_port_templates_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_dcim_console_port_templates_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_dcim_console_port_templates_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_dcim_console_ports_list.md b/docs/data-sources/json_dcim_console_ports_list.md deleted file mode 100644 index 251aabae8..000000000 --- a/docs/data-sources/json_dcim_console_ports_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_dcim\_console\_ports\_list Data Source - -Get json output from the dcim_console_ports_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_dcim_console_ports_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_dcim_console_ports_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_dcim_console_server_port_templates_list.md b/docs/data-sources/json_dcim_console_server_port_templates_list.md deleted file mode 100644 index 880c2e719..000000000 --- a/docs/data-sources/json_dcim_console_server_port_templates_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_dcim\_console\_server\_port\_templates\_list Data Source - -Get json output from the dcim_console_server_port_templates_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_dcim_console_server_port_templates_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_dcim_console_server_port_templates_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_dcim_console_server_ports_list.md b/docs/data-sources/json_dcim_console_server_ports_list.md deleted file mode 100644 index 1d5352706..000000000 --- a/docs/data-sources/json_dcim_console_server_ports_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_dcim\_console\_server\_ports\_list Data Source - -Get json output from the dcim_console_server_ports_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_dcim_console_server_ports_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_dcim_console_server_ports_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_dcim_device_bay_templates_list.md b/docs/data-sources/json_dcim_device_bay_templates_list.md deleted file mode 100644 index e48c43140..000000000 --- a/docs/data-sources/json_dcim_device_bay_templates_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_dcim\_device\_bay\_templates\_list Data Source - -Get json output from the dcim_device_bay_templates_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_dcim_device_bay_templates_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_dcim_device_bay_templates_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_dcim_device_bays_list.md b/docs/data-sources/json_dcim_device_bays_list.md deleted file mode 100644 index f39e7d33d..000000000 --- a/docs/data-sources/json_dcim_device_bays_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_dcim\_device\_bays\_list Data Source - -Get json output from the dcim_device_bays_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_dcim_device_bays_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_dcim_device_bays_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_dcim_device_roles_list.md b/docs/data-sources/json_dcim_device_roles_list.md deleted file mode 100644 index 2a713e594..000000000 --- a/docs/data-sources/json_dcim_device_roles_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_dcim\_device\_roles\_list Data Source - -Get json output from the dcim_device_roles_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_dcim_device_roles_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_dcim_device_roles_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_dcim_device_types_list.md b/docs/data-sources/json_dcim_device_types_list.md deleted file mode 100644 index 14ef26202..000000000 --- a/docs/data-sources/json_dcim_device_types_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_dcim\_device\_types\_list Data Source - -Get json output from the dcim_device_types_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_dcim_device_types_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_dcim_device_types_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_dcim_devices_list.md b/docs/data-sources/json_dcim_devices_list.md deleted file mode 100644 index c83a0bc4e..000000000 --- a/docs/data-sources/json_dcim_devices_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_dcim\_devices\_list Data Source - -Get json output from the dcim_devices_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_dcim_devices_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_dcim_devices_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_dcim_front_port_templates_list.md b/docs/data-sources/json_dcim_front_port_templates_list.md deleted file mode 100644 index 1f3e7b2e9..000000000 --- a/docs/data-sources/json_dcim_front_port_templates_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_dcim\_front\_port\_templates\_list Data Source - -Get json output from the dcim_front_port_templates_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_dcim_front_port_templates_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_dcim_front_port_templates_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_dcim_front_ports_list.md b/docs/data-sources/json_dcim_front_ports_list.md deleted file mode 100644 index 26a9e6979..000000000 --- a/docs/data-sources/json_dcim_front_ports_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_dcim\_front\_ports\_list Data Source - -Get json output from the dcim_front_ports_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_dcim_front_ports_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_dcim_front_ports_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_dcim_interface_templates_list.md b/docs/data-sources/json_dcim_interface_templates_list.md deleted file mode 100644 index 348343f2b..000000000 --- a/docs/data-sources/json_dcim_interface_templates_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_dcim\_interface\_templates\_list Data Source - -Get json output from the dcim_interface_templates_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_dcim_interface_templates_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_dcim_interface_templates_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_dcim_interfaces_list.md b/docs/data-sources/json_dcim_interfaces_list.md deleted file mode 100644 index 132a06151..000000000 --- a/docs/data-sources/json_dcim_interfaces_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_dcim\_interfaces\_list Data Source - -Get json output from the dcim_interfaces_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_dcim_interfaces_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_dcim_interfaces_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_dcim_inventory_items_list.md b/docs/data-sources/json_dcim_inventory_items_list.md deleted file mode 100644 index 4a891fb71..000000000 --- a/docs/data-sources/json_dcim_inventory_items_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_dcim\_inventory\_items\_list Data Source - -Get json output from the dcim_inventory_items_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_dcim_inventory_items_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_dcim_inventory_items_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_dcim_locations_list.md b/docs/data-sources/json_dcim_locations_list.md deleted file mode 100644 index 7333dba59..000000000 --- a/docs/data-sources/json_dcim_locations_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_dcim\_locations\_list Data Source - -Get json output from the dcim_locations_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_dcim_locations_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_dcim_locations_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_dcim_manufacturers_list.md b/docs/data-sources/json_dcim_manufacturers_list.md deleted file mode 100644 index 520767bc1..000000000 --- a/docs/data-sources/json_dcim_manufacturers_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_dcim\_manufacturers\_list Data Source - -Get json output from the dcim_manufacturers_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_dcim_manufacturers_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_dcim_manufacturers_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_dcim_platforms_list.md b/docs/data-sources/json_dcim_platforms_list.md deleted file mode 100644 index e5bbecdcf..000000000 --- a/docs/data-sources/json_dcim_platforms_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_dcim\_platforms\_list Data Source - -Get json output from the dcim_platforms_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_dcim_platforms_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_dcim_platforms_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_dcim_power_feeds_list.md b/docs/data-sources/json_dcim_power_feeds_list.md deleted file mode 100644 index f989586b3..000000000 --- a/docs/data-sources/json_dcim_power_feeds_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_dcim\_power\_feeds\_list Data Source - -Get json output from the dcim_power_feeds_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_dcim_power_feeds_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_dcim_power_feeds_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_dcim_power_outlet_templates_list.md b/docs/data-sources/json_dcim_power_outlet_templates_list.md deleted file mode 100644 index 0c7244f76..000000000 --- a/docs/data-sources/json_dcim_power_outlet_templates_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_dcim\_power\_outlet\_templates\_list Data Source - -Get json output from the dcim_power_outlet_templates_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_dcim_power_outlet_templates_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_dcim_power_outlet_templates_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_dcim_power_outlets_list.md b/docs/data-sources/json_dcim_power_outlets_list.md deleted file mode 100644 index 290d99ad0..000000000 --- a/docs/data-sources/json_dcim_power_outlets_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_dcim\_power\_outlets\_list Data Source - -Get json output from the dcim_power_outlets_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_dcim_power_outlets_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_dcim_power_outlets_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_dcim_power_panels_list.md b/docs/data-sources/json_dcim_power_panels_list.md deleted file mode 100644 index 30c300cd3..000000000 --- a/docs/data-sources/json_dcim_power_panels_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_dcim\_power\_panels\_list Data Source - -Get json output from the dcim_power_panels_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_dcim_power_panels_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_dcim_power_panels_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_dcim_power_port_templates_list.md b/docs/data-sources/json_dcim_power_port_templates_list.md deleted file mode 100644 index 2e7b818d8..000000000 --- a/docs/data-sources/json_dcim_power_port_templates_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_dcim\_power\_port\_templates\_list Data Source - -Get json output from the dcim_power_port_templates_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_dcim_power_port_templates_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_dcim_power_port_templates_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_dcim_power_ports_list.md b/docs/data-sources/json_dcim_power_ports_list.md deleted file mode 100644 index be4bbcdb2..000000000 --- a/docs/data-sources/json_dcim_power_ports_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_dcim\_power\_ports\_list Data Source - -Get json output from the dcim_power_ports_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_dcim_power_ports_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_dcim_power_ports_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_dcim_rack_reservations_list.md b/docs/data-sources/json_dcim_rack_reservations_list.md deleted file mode 100644 index c1692eb3d..000000000 --- a/docs/data-sources/json_dcim_rack_reservations_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_dcim\_rack\_reservations\_list Data Source - -Get json output from the dcim_rack_reservations_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_dcim_rack_reservations_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_dcim_rack_reservations_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_dcim_rack_roles_list.md b/docs/data-sources/json_dcim_rack_roles_list.md deleted file mode 100644 index 395354602..000000000 --- a/docs/data-sources/json_dcim_rack_roles_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_dcim\_rack\_roles\_list Data Source - -Get json output from the dcim_rack_roles_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_dcim_rack_roles_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_dcim_rack_roles_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_dcim_racks_list.md b/docs/data-sources/json_dcim_racks_list.md deleted file mode 100644 index 73ed54e92..000000000 --- a/docs/data-sources/json_dcim_racks_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_dcim\_racks\_list Data Source - -Get json output from the dcim_racks_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_dcim_racks_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_dcim_racks_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_dcim_rear_port_templates_list.md b/docs/data-sources/json_dcim_rear_port_templates_list.md deleted file mode 100644 index 93ec09668..000000000 --- a/docs/data-sources/json_dcim_rear_port_templates_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_dcim\_rear\_port\_templates\_list Data Source - -Get json output from the dcim_rear_port_templates_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_dcim_rear_port_templates_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_dcim_rear_port_templates_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_dcim_rear_ports_list.md b/docs/data-sources/json_dcim_rear_ports_list.md deleted file mode 100644 index 7cff121b1..000000000 --- a/docs/data-sources/json_dcim_rear_ports_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_dcim\_rear\_ports\_list Data Source - -Get json output from the dcim_rear_ports_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_dcim_rear_ports_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_dcim_rear_ports_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_dcim_regions_list.md b/docs/data-sources/json_dcim_regions_list.md deleted file mode 100644 index b769105fe..000000000 --- a/docs/data-sources/json_dcim_regions_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_dcim\_regions\_list Data Source - -Get json output from the dcim_regions_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_dcim_regions_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_dcim_regions_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_dcim_site_groups_list.md b/docs/data-sources/json_dcim_site_groups_list.md deleted file mode 100644 index eb9c4513d..000000000 --- a/docs/data-sources/json_dcim_site_groups_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_dcim\_site\_groups\_list Data Source - -Get json output from the dcim_site_groups_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_dcim_site_groups_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_dcim_site_groups_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_dcim_sites_list.md b/docs/data-sources/json_dcim_sites_list.md deleted file mode 100644 index ec2984ce8..000000000 --- a/docs/data-sources/json_dcim_sites_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_dcim\_sites\_list Data Source - -Get json output from the dcim_sites_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_dcim_sites_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_dcim_sites_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_dcim_virtual_chassis_list.md b/docs/data-sources/json_dcim_virtual_chassis_list.md deleted file mode 100644 index 2d66c565a..000000000 --- a/docs/data-sources/json_dcim_virtual_chassis_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_dcim\_virtual\_chassis\_list Data Source - -Get json output from the dcim_virtual_chassis_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_dcim_virtual_chassis_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_dcim_virtual_chassis_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_extras_config_contexts_list.md b/docs/data-sources/json_extras_config_contexts_list.md deleted file mode 100644 index e67e2e23c..000000000 --- a/docs/data-sources/json_extras_config_contexts_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_extras\_config\_contexts\_list Data Source - -Get json output from the extras_config_contexts_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_extras_config_contexts_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_extras_config_contexts_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_extras_content_types_list.md b/docs/data-sources/json_extras_content_types_list.md deleted file mode 100644 index 2359b9c11..000000000 --- a/docs/data-sources/json_extras_content_types_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_extras\_content\_types\_list Data Source - -Get json output from the extras_content_types_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_extras_content_types_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_extras_content_types_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_extras_custom_fields_list.md b/docs/data-sources/json_extras_custom_fields_list.md deleted file mode 100644 index 503066176..000000000 --- a/docs/data-sources/json_extras_custom_fields_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_extras\_custom\_fields\_list Data Source - -Get json output from the extras_custom_fields_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_extras_custom_fields_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_extras_custom_fields_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_extras_custom_links_list.md b/docs/data-sources/json_extras_custom_links_list.md deleted file mode 100644 index 0b5f9ac0d..000000000 --- a/docs/data-sources/json_extras_custom_links_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_extras\_custom\_links\_list Data Source - -Get json output from the extras_custom_links_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_extras_custom_links_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_extras_custom_links_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_extras_export_templates_list.md b/docs/data-sources/json_extras_export_templates_list.md deleted file mode 100644 index 4674f902a..000000000 --- a/docs/data-sources/json_extras_export_templates_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_extras\_export\_templates\_list Data Source - -Get json output from the extras_export_templates_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_extras_export_templates_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_extras_export_templates_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_extras_image_attachments_list.md b/docs/data-sources/json_extras_image_attachments_list.md deleted file mode 100644 index 4d74b7e60..000000000 --- a/docs/data-sources/json_extras_image_attachments_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_extras\_image\_attachments\_list Data Source - -Get json output from the extras_image_attachments_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_extras_image_attachments_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_extras_image_attachments_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_extras_job_results_list.md b/docs/data-sources/json_extras_job_results_list.md deleted file mode 100644 index 4f210aa30..000000000 --- a/docs/data-sources/json_extras_job_results_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_extras\_job\_results\_list Data Source - -Get json output from the extras_job_results_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_extras_job_results_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_extras_job_results_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_extras_journal_entries_list.md b/docs/data-sources/json_extras_journal_entries_list.md deleted file mode 100644 index 2fa91907e..000000000 --- a/docs/data-sources/json_extras_journal_entries_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_extras\_journal\_entries\_list Data Source - -Get json output from the extras_journal_entries_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_extras_journal_entries_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_extras_journal_entries_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_extras_object_changes_list.md b/docs/data-sources/json_extras_object_changes_list.md deleted file mode 100644 index 2d22db6aa..000000000 --- a/docs/data-sources/json_extras_object_changes_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_extras\_object\_changes\_list Data Source - -Get json output from the extras_object_changes_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_extras_object_changes_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_extras_object_changes_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_extras_tags_list.md b/docs/data-sources/json_extras_tags_list.md deleted file mode 100644 index 1b214766f..000000000 --- a/docs/data-sources/json_extras_tags_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_extras\_tags\_list Data Source - -Get json output from the extras_tags_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_extras_tags_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_extras_tags_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_extras_webhooks_list.md b/docs/data-sources/json_extras_webhooks_list.md deleted file mode 100644 index e963b185d..000000000 --- a/docs/data-sources/json_extras_webhooks_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_extras\_webhooks\_list Data Source - -Get json output from the extras_webhooks_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_extras_webhooks_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_extras_webhooks_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_ipam_aggregates_list.md b/docs/data-sources/json_ipam_aggregates_list.md deleted file mode 100644 index 0bbb60caa..000000000 --- a/docs/data-sources/json_ipam_aggregates_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_ipam\_aggregates\_list Data Source - -Get json output from the ipam_aggregates_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_ipam_aggregates_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_ipam_aggregates_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_ipam_asns_list.md b/docs/data-sources/json_ipam_asns_list.md deleted file mode 100644 index 37e82b3c4..000000000 --- a/docs/data-sources/json_ipam_asns_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_ipam\_asns\_list Data Source - -Get json output from the ipam_asns_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_ipam_asns_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_ipam_asns_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_ipam_fhrp_group_assignments_list.md b/docs/data-sources/json_ipam_fhrp_group_assignments_list.md deleted file mode 100644 index 3d9c49a4b..000000000 --- a/docs/data-sources/json_ipam_fhrp_group_assignments_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_ipam\_fhrp\_group\_assignments\_list Data Source - -Get json output from the ipam_fhrp_group_assignments_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_ipam_fhrp_group_assignments_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_ipam_fhrp_group_assignments_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_ipam_fhrp_groups_list.md b/docs/data-sources/json_ipam_fhrp_groups_list.md deleted file mode 100644 index f0ce1b838..000000000 --- a/docs/data-sources/json_ipam_fhrp_groups_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_ipam\_fhrp\_groups\_list Data Source - -Get json output from the ipam_fhrp_groups_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_ipam_fhrp_groups_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_ipam_fhrp_groups_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_ipam_ip_addresses_list.md b/docs/data-sources/json_ipam_ip_addresses_list.md deleted file mode 100644 index f80d9d872..000000000 --- a/docs/data-sources/json_ipam_ip_addresses_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_ipam\_ip\_addresses\_list Data Source - -Get json output from the ipam_ip_addresses_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_ipam_ip_addresses_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_ipam_ip_addresses_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_ipam_ip_ranges_list.md b/docs/data-sources/json_ipam_ip_ranges_list.md deleted file mode 100644 index 44d2d0352..000000000 --- a/docs/data-sources/json_ipam_ip_ranges_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_ipam\_ip\_ranges\_list Data Source - -Get json output from the ipam_ip_ranges_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_ipam_ip_ranges_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_ipam_ip_ranges_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_ipam_prefixes_list.md b/docs/data-sources/json_ipam_prefixes_list.md deleted file mode 100644 index b9cb95e02..000000000 --- a/docs/data-sources/json_ipam_prefixes_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_ipam\_prefixes\_list Data Source - -Get json output from the ipam_prefixes_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_ipam_prefixes_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_ipam_prefixes_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_ipam_rirs_list.md b/docs/data-sources/json_ipam_rirs_list.md deleted file mode 100644 index 8c681a6d2..000000000 --- a/docs/data-sources/json_ipam_rirs_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_ipam\_rirs\_list Data Source - -Get json output from the ipam_rirs_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_ipam_rirs_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_ipam_rirs_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_ipam_roles_list.md b/docs/data-sources/json_ipam_roles_list.md deleted file mode 100644 index 9a230dee2..000000000 --- a/docs/data-sources/json_ipam_roles_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_ipam\_roles\_list Data Source - -Get json output from the ipam_roles_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_ipam_roles_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_ipam_roles_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_ipam_route_targets_list.md b/docs/data-sources/json_ipam_route_targets_list.md deleted file mode 100644 index 4d53ae3f7..000000000 --- a/docs/data-sources/json_ipam_route_targets_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_ipam\_route\_targets\_list Data Source - -Get json output from the ipam_route_targets_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_ipam_route_targets_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_ipam_route_targets_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_ipam_services_list.md b/docs/data-sources/json_ipam_services_list.md deleted file mode 100644 index 38824bb5e..000000000 --- a/docs/data-sources/json_ipam_services_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_ipam\_services\_list Data Source - -Get json output from the ipam_services_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_ipam_services_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_ipam_services_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_ipam_vlan_groups_list.md b/docs/data-sources/json_ipam_vlan_groups_list.md deleted file mode 100644 index c298814cc..000000000 --- a/docs/data-sources/json_ipam_vlan_groups_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_ipam\_vlan\_groups\_list Data Source - -Get json output from the ipam_vlan_groups_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_ipam_vlan_groups_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_ipam_vlan_groups_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_ipam_vlans_list.md b/docs/data-sources/json_ipam_vlans_list.md deleted file mode 100644 index b4a5a0b15..000000000 --- a/docs/data-sources/json_ipam_vlans_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_ipam\_vlans\_list Data Source - -Get json output from the ipam_vlans_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_ipam_vlans_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_ipam_vlans_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_ipam_vrfs_list.md b/docs/data-sources/json_ipam_vrfs_list.md deleted file mode 100644 index 88fe3eac9..000000000 --- a/docs/data-sources/json_ipam_vrfs_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_ipam\_vrfs\_list Data Source - -Get json output from the ipam_vrfs_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_ipam_vrfs_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_ipam_vrfs_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_tenancy_contact_assignments_list.md b/docs/data-sources/json_tenancy_contact_assignments_list.md deleted file mode 100644 index 9fb02ec39..000000000 --- a/docs/data-sources/json_tenancy_contact_assignments_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_tenancy\_contact\_assignments\_list Data Source - -Get json output from the tenancy_contact_assignments_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_tenancy_contact_assignments_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_tenancy_contact_assignments_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_tenancy_contact_groups_list.md b/docs/data-sources/json_tenancy_contact_groups_list.md deleted file mode 100644 index 7a7c1ed04..000000000 --- a/docs/data-sources/json_tenancy_contact_groups_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_tenancy\_contact\_groups\_list Data Source - -Get json output from the tenancy_contact_groups_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_tenancy_contact_groups_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_tenancy_contact_groups_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_tenancy_contact_roles_list.md b/docs/data-sources/json_tenancy_contact_roles_list.md deleted file mode 100644 index 39f943429..000000000 --- a/docs/data-sources/json_tenancy_contact_roles_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_tenancy\_contact\_roles\_list Data Source - -Get json output from the tenancy_contact_roles_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_tenancy_contact_roles_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_tenancy_contact_roles_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_tenancy_contacts_list.md b/docs/data-sources/json_tenancy_contacts_list.md deleted file mode 100644 index 2422ba8ac..000000000 --- a/docs/data-sources/json_tenancy_contacts_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_tenancy\_contacts\_list Data Source - -Get json output from the tenancy_contacts_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_tenancy_contacts_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_tenancy_contacts_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_tenancy_tenant_groups_list.md b/docs/data-sources/json_tenancy_tenant_groups_list.md deleted file mode 100644 index a0a3f9b05..000000000 --- a/docs/data-sources/json_tenancy_tenant_groups_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_tenancy\_tenant\_groups\_list Data Source - -Get json output from the tenancy_tenant_groups_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_tenancy_tenant_groups_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_tenancy_tenant_groups_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_tenancy_tenants_list.md b/docs/data-sources/json_tenancy_tenants_list.md deleted file mode 100644 index 79b9d79ff..000000000 --- a/docs/data-sources/json_tenancy_tenants_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_tenancy\_tenants\_list Data Source - -Get json output from the tenancy_tenants_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_tenancy_tenants_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_tenancy_tenants_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_users_groups_list.md b/docs/data-sources/json_users_groups_list.md deleted file mode 100644 index 8ba840f70..000000000 --- a/docs/data-sources/json_users_groups_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_users\_groups\_list Data Source - -Get json output from the users_groups_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_users_groups_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_users_groups_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_users_permissions_list.md b/docs/data-sources/json_users_permissions_list.md deleted file mode 100644 index 148473b41..000000000 --- a/docs/data-sources/json_users_permissions_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_users\_permissions\_list Data Source - -Get json output from the users_permissions_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_users_permissions_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_users_permissions_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_users_tokens_list.md b/docs/data-sources/json_users_tokens_list.md deleted file mode 100644 index c28f9c499..000000000 --- a/docs/data-sources/json_users_tokens_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_users\_tokens\_list Data Source - -Get json output from the users_tokens_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_users_tokens_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_users_tokens_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_users_users_list.md b/docs/data-sources/json_users_users_list.md deleted file mode 100644 index 3a71ba233..000000000 --- a/docs/data-sources/json_users_users_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_users\_users\_list Data Source - -Get json output from the users_users_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_users_users_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_users_users_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_virtualization_cluster_groups_list.md b/docs/data-sources/json_virtualization_cluster_groups_list.md deleted file mode 100644 index 9d120bbee..000000000 --- a/docs/data-sources/json_virtualization_cluster_groups_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_virtualization\_cluster\_groups\_list Data Source - -Get json output from the virtualization_cluster_groups_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_virtualization_cluster_groups_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_virtualization_cluster_groups_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_virtualization_cluster_types_list.md b/docs/data-sources/json_virtualization_cluster_types_list.md deleted file mode 100644 index dae52c6e7..000000000 --- a/docs/data-sources/json_virtualization_cluster_types_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_virtualization\_cluster\_types\_list Data Source - -Get json output from the virtualization_cluster_types_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_virtualization_cluster_types_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_virtualization_cluster_types_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_virtualization_clusters_list.md b/docs/data-sources/json_virtualization_clusters_list.md deleted file mode 100644 index 493f868ef..000000000 --- a/docs/data-sources/json_virtualization_clusters_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_virtualization\_clusters\_list Data Source - -Get json output from the virtualization_clusters_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_virtualization_clusters_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_virtualization_clusters_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_virtualization_interfaces_list.md b/docs/data-sources/json_virtualization_interfaces_list.md deleted file mode 100644 index 900f99ffb..000000000 --- a/docs/data-sources/json_virtualization_interfaces_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_virtualization\_interfaces\_list Data Source - -Get json output from the virtualization_interfaces_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_virtualization_interfaces_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_virtualization_interfaces_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_virtualization_virtual_machines_list.md b/docs/data-sources/json_virtualization_virtual_machines_list.md deleted file mode 100644 index 393cdd857..000000000 --- a/docs/data-sources/json_virtualization_virtual_machines_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_virtualization\_virtual\_machines\_list Data Source - -Get json output from the virtualization_virtual_machines_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_virtualization_virtual_machines_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_virtualization_virtual_machines_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_wireless_wireless_lan_groups_list.md b/docs/data-sources/json_wireless_wireless_lan_groups_list.md deleted file mode 100644 index 0723d504f..000000000 --- a/docs/data-sources/json_wireless_wireless_lan_groups_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_wireless\_wireless\_lan\_groups\_list Data Source - -Get json output from the wireless_wireless_lan_groups_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_wireless_wireless_lan_groups_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_wireless_wireless_lan_groups_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_wireless_wireless_lans_list.md b/docs/data-sources/json_wireless_wireless_lans_list.md deleted file mode 100644 index 7562e1339..000000000 --- a/docs/data-sources/json_wireless_wireless_lans_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_wireless\_wireless\_lans\_list Data Source - -Get json output from the wireless_wireless_lans_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_wireless_wireless_lans_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_wireless_wireless_lans_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/json_wireless_wireless_links_list.md b/docs/data-sources/json_wireless_wireless_links_list.md deleted file mode 100644 index b2bf21372..000000000 --- a/docs/data-sources/json_wireless_wireless_links_list.md +++ /dev/null @@ -1,24 +0,0 @@ -# netbox\_json\_wireless\_wireless\_links\_list Data Source - -Get json output from the wireless_wireless_links_list Netbox endpoint - -## Example Usage - -```hcl -data "netbox_json_wireless_wireless_links_list" "test" { - limit = 0 -} -output "example" { - value = jsondecode(data.netbox_json_wireless_wireless_links_list.test.json) -} -``` - -## Argument Reference - -* ``limit`` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``json`` - JSON output of the list of objects for this Netbox endpoint. - diff --git a/docs/data-sources/tenancy_contact.md b/docs/data-sources/tenancy_contact.md deleted file mode 100644 index 7f85dcc19..000000000 --- a/docs/data-sources/tenancy_contact.md +++ /dev/null @@ -1,22 +0,0 @@ -# netbox\_tenancy\_contact Data Source - -Get info about tenancy contact from netbox. - -## Example Usage - -```hcl -data "netbox_tenancy_contact" "contact_test" { - name = "John Doe" -} -``` - -## Argument Reference - -The following arguments are supported: -* ``name`` - (Required) The name of the contact. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``id`` - The id (ref in Netbox) of this object. -* ``content_type`` - The content type of this object. diff --git a/docs/data-sources/tenancy_contact_group.md b/docs/data-sources/tenancy_contact_group.md deleted file mode 100644 index 1d1992b72..000000000 --- a/docs/data-sources/tenancy_contact_group.md +++ /dev/null @@ -1,22 +0,0 @@ -# netbox\_tenancy\_contact\_group Data Source - -Get info about tenancy contact groups from netbox. - -## Example Usage - -```hcl -data "netbox_tenancy_contact_group" "contact_group_test" { - slug = "TestContactGroup" -} -``` - -## Argument Reference - -The following arguments are supported: -* ``slug`` - (Required) The slug of the tenancy contact groups. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``id`` - The id (ref in Netbox) of this object. -* ``content_type`` - The content type of this object. diff --git a/docs/data-sources/tenancy_contact_role.md b/docs/data-sources/tenancy_contact_role.md deleted file mode 100644 index 1173b8b43..000000000 --- a/docs/data-sources/tenancy_contact_role.md +++ /dev/null @@ -1,22 +0,0 @@ -# netbox\_tenancy\_contact\_role Data Source - -Get info about tenancy contact roles from netbox. - -## Example Usage - -```hcl -data "netbox_tenancy_contact_role" "contact_role_test" { - slug = "TestContactGroup" -} -``` - -## Argument Reference - -The following arguments are supported: -* ``slug`` - (Required) The slug of the tenancy contact roles. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``id`` - The id (ref in Netbox) of this object. -* ``content_type`` - The content type of this object. diff --git a/docs/data-sources/tenancy_tenant.md b/docs/data-sources/tenancy_tenant.md deleted file mode 100644 index cb815e8c0..000000000 --- a/docs/data-sources/tenancy_tenant.md +++ /dev/null @@ -1,22 +0,0 @@ -# netbox\_tenancy\_tenant Data Source - -Get info about tenancy tenant from netbox. - -## Example Usage - -```hcl -data "netbox_tenancy_tenant" "tenant_test" { - slug = "TestTenant" -} -``` - -## Argument Reference - -The following arguments are supported: -* ``slug`` - (Required) The slug of the tenant. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``id`` - The id (ref in Netbox) of this object. -* ``content_type`` - The content type of this object. diff --git a/docs/data-sources/tenancy_tenant_group.md b/docs/data-sources/tenancy_tenant_group.md deleted file mode 100644 index fc9b7cae0..000000000 --- a/docs/data-sources/tenancy_tenant_group.md +++ /dev/null @@ -1,22 +0,0 @@ -# netbox\_tenancy\_tenant\_group Data Source - -Get info about tenancy tenant groups from netbox. - -## Example Usage - -```hcl -data "netbox_tenancy_tenant_group" "tenant_group_test" { - slug = "TestTenantGroup" -} -``` - -## Argument Reference - -The following arguments are supported: -* ``slug`` - (Required) The slug of the tenancy tenant groups. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``id`` - The id (ref in Netbox) of this object. -* ``content_type`` - The content type of this object. diff --git a/docs/data-sources/virtualization_cluster.md b/docs/data-sources/virtualization_cluster.md deleted file mode 100644 index b70acfefe..000000000 --- a/docs/data-sources/virtualization_cluster.md +++ /dev/null @@ -1,22 +0,0 @@ -# netbox\_virtualization\_cluster Data Source - -Get info about vitualization cluster from netbox. - -## Example Usage - -```hcl -data "netbox_virtualization_cluster" "cluster_test" { - name = "TestCluster" -} -``` - -## Argument Reference - -The following arguments are supported: -* ``name`` - (Required) The name of the cluster. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``id`` - The id (ref in Netbox) of this object. -* ``content_type`` - The content type of this object. diff --git a/docs/index.md b/docs/index.md deleted file mode 100644 index 512f5fb0e..000000000 --- a/docs/index.md +++ /dev/null @@ -1,52 +0,0 @@ -# terraform-provider-netbox Provider - -Terraform provider for [Netbox.](https://netbox.readthedocs.io/en/stable/) - -## Compatibility with Netbox - -| Netbox version | Provider version | -|:--------------:|:----------------:| -| 2.8 | 0.x.y | -| 2.9 | 1.x.y | -| 2.11 | 2.x.y | -| 3.0 | 3.x.y | -| 3.1 | 4.x.y | - -## Example Usage - -```hcl -terraform { - required_version = ">= 0.14.0" - required_providers { - netbox = { - source = "smutel/netbox" - version = "~> 4.0.0" - } - } -} - -provider netbox { - # Environment variable NETBOX_URL - url = "127.0.0.1:8000" - - # Environment variable NETBOX_BASEPATH - basepath = "/api" - - # Environment variable NETBOX_TOKEN - token = "0123456789abcdef0123456789abcdef01234567" - - # Environment variable NETBOX_SCHEME - scheme = "http" - - # Environment variable NETBOX_INSECURE - insecure = "true" -} -``` - -## Argument Reference - -* `url` or `NETBOX_URL` environment variable to define the URL and the port (127.0.0.1:8000 by default) -* `basepath` or `NETBOX_BASEPATH` environment variable to define the base path (/api) -* `token` or `NETBOX_TOKEN` environment variable to define the TOKEN to access the application (empty by default) -* `scheme` or `NETBOX_SCHEME` environment variable to define the SCHEME of the URL (https by default) -* `insecure` or `NETBOX_INSECURE` environment variable to skip or not the TLS certificat validation (false by default) diff --git a/docs/resources/ipam_aggregate.md b/docs/resources/ipam_aggregate.md deleted file mode 100644 index adb1a0ba0..000000000 --- a/docs/resources/ipam_aggregate.md +++ /dev/null @@ -1,91 +0,0 @@ -# netbox\_ipam\_aggregate Resource - -Manage an aggregate within Netbox. - -## Example Usage - -```hcl -resource "netbox_ipam_aggregate" "aggregate_test" { - prefix = "192.168.56.0/24" - rir_id = 1 - date_created = "2020-12-21" - - tag { - name = "tag1" - slug = "tag1" - } - - custom_field { - name = "cf_boolean" - type = "boolean" - value = "true" - } - - custom_field { - name = "cf_date" - type = "date" - value = "2020-12-25" - } - - custom_field { - name = "cf_text" - type = "text" - value = "some text" - } - - custom_field { - name = "cf_integer" - type = "integer" - value = "10" - } - - custom_field { - name = "cf_selection" - type = "selection" - value = "1" - } - - custom_field { - name = "cf_url" - type = "url" - value = "https://github.com" - } - - custom_field { - name = "cf_multiple_selection" - type = "multiple" - value = "0,1" - } -} -``` - -## Argument Reference - -The following arguments are supported: -* ``date_added`` - (Optional) Date when this aggregate was added. Format *YYYY-MM-DD*. -* ``description`` - (Optional) The description of this object. -* ``prefix`` - (Required) The prefix (with mask) used for this object. -* ``rir_id`` - (Required) The RIR id linked to this object. - -The ``custom_field`` block (optional) supports: -* ``name`` - (Required) Name of the existing custom resource to associate with this resource. -* ``type`` - (Required) Type of the existing custom resource to associate with this resource (text, integer, boolean, url, selection, multiple). -* ``value`` - (Required) Value of the existing custom resource to associate with this resource. - -The ``tag`` block (optional) supports: -* ``name`` - (Required) Name of the existing tag to associate with this resource. -* ``slug`` - (Required) Slug of the existing tag to associate with this resource. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``id`` - The id (ref in Netbox) of this object. -* ``content_type`` - The content type of this object. - -## Import - -Aggregates can be imported by `id` e.g. - -``` -$ terraform import netbox_ipam_aggregate.aggregate_test id -``` diff --git a/docs/resources/ipam_ip_addresses.md b/docs/resources/ipam_ip_addresses.md deleted file mode 100644 index a2b7f18b9..000000000 --- a/docs/resources/ipam_ip_addresses.md +++ /dev/null @@ -1,99 +0,0 @@ -# netbox\_ipam\_ip\_addresses Resource - -Manage an ip address within Netbox. - -## Example Usage - -```hcl -resource "netbox_ipam_ip_addresses" "ip_test" { - address = "192.168.56.0/24" - description = "IP created by terraform" - status = "active" - - tag { - name = "tag1" - slug = "tag1" - } - - custom_field { - name = "cf_boolean" - type = "boolean" - value = "true" - } - - custom_field { - name = "cf_date" - type = "date" - value = "2020-12-25" - } - - custom_field { - name = "cf_text" - type = "text" - value = "some text" - } - - custom_field { - name = "cf_integer" - type = "integer" - value = "10" - } - - custom_field { - name = "cf_selection" - type = "selection" - value = "1" - } - - custom_field { - name = "cf_url" - type = "url" - value = "https://github.com" - } - - custom_field { - name = "cf_multiple_selection" - type = "multiple" - value = "0,1" - } -} -``` - -## Argument Reference - -The following arguments are supported: -* ``address`` - (Required) The IP address (with mask) used for this object. -* ``description`` - (Optional) The description of this object. -* ``dns_name`` - (Optional) The DNS name of this object. -* ``nat_inside_id`` - (Optional) The ID of the NAT inside of this object. -* ``object_id`` - (Optional) The ID of the object where this resource is attached to. -* ``object_type`` - (Optional) The object type among virtualization.vminterface -or dcim.interface (empty by default) -* ``primary_ip4`` - (Optional) Set this resource as primary IPv4 (false by default) -* ``role`` - (Optional) The role among loopback, secondary, anycast, vip, vrrp, hsrp, glbp, carp of this object. -* ``status`` - (Optional) The status among container, active, reserved, deprecated (active by default). -* ``tenant_id`` - (Optional) ID of the tenant where this object is attached. -* ``vrf_id`` - (Optional) The ID of the vrf attached to this object. - -The ``custom_field`` block (optional) supports: -* ``name`` - (Required) Name of the existing custom resource to associate with this resource. -* ``type`` - (Required) Type of the existing custom resource to associate with this resource (text, integer, boolean, url, selection, multiple). -* ``value`` - (Required) Value of the existing custom resource to associate with this resource. - -The ``tag`` block (optional) supports: -* ``name`` - (Required) Name of the existing tag to associate with this resource. -* ``slug`` - (Required) Slug of the existing tag to associate with this resource. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``id`` - The id (ref in Netbox) of this object. -* ``content_type`` - The content type of this object. - -## Import - -Ip addresses can be imported by `id` e.g. - -``` -$ terraform import netbox_ipam_ip_addresses.ip_test id -``` diff --git a/docs/resources/ipam_prefix.md b/docs/resources/ipam_prefix.md deleted file mode 100644 index a9fc97930..000000000 --- a/docs/resources/ipam_prefix.md +++ /dev/null @@ -1,99 +0,0 @@ -# netbox\_ipam\_prefix Resource - -Manage a prefix within Netbox. - -## Example Usage - -```hcl -resource "netbox_ipam_prefix" "prefix_test" { - prefix = "192.168.56.0/24" - vlan_id = netbox_ipam_vlan.vlan_test.id - description = "Prefix created by terraform" - site_id = netbox_ipam_vlan_group.vlan_group_test.site_id - role_id = data.netbox_ipam_roles.vlan_role_production.id - status = "active" - - tag { - name = "tag1" - slug = "tag1" - } - - custom_field { - name = "cf_boolean" - type = "boolean" - value = "true" - } - - custom_field { - name = "cf_date" - type = "date" - value = "2020-12-25" - } - - custom_field { - name = "cf_text" - type = "text" - value = "some text" - } - - custom_field { - name = "cf_integer" - type = "integer" - value = "10" - } - - custom_field { - name = "cf_selection" - type = "selection" - value = "1" - } - - custom_field { - name = "cf_url" - type = "url" - value = "https://github.com" - } - - custom_field { - name = "cf_multiple_selection" - type = "multiple" - value = "0,1" - } -} -``` - -## Argument Reference - -The following arguments are supported: -* ``description`` - (Optional) The description of this object. -* ``is_pool`` - (Optional) Define if this object is a pool (false by default). -* ``prefix`` - (Required) The prefix (IP address/mask) used for this object. -* ``role_id`` - (Optional) The ID of the role attached to this object. -* ``site_id`` - (Optional) ID of the site where this object is created -* ``status`` - (Optional) The status among container, active, reserved, deprecated (active by default). -* ``tenant_id`` - (Optional) ID of the tenant where this object is attached. -* ``vlan_id`` - (Optional) ID of the vlan where this object is attached. -* ``vrf_id`` - (Optional) The ID of the vrf attached to this object. - -The ``custom_field`` block (optional) supports: -* ``name`` - (Required) Name of the existing custom resource to associate with this resource. -* ``type`` - (Required) Type of the existing custom resource to associate with this resource (text, integer, boolean, url, selection, multiple). -* ``value`` - (Required) Value of the existing custom resource to associate with this resource. - -The ``tag`` block supports: -* ``name`` - (Required) Name of the existing tag to associate with this resource. -* ``slug`` - (Required) Slug of the existing tag to associate with this resource. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``id`` - The id (ref in Netbox) of this object. -* ``content_type`` - The content type of this object. - -## Import - -Prefixes can be imported by `id` e.g. - -``` -$ terraform import netbox_ipam_prefix.prefix_test id -``` diff --git a/docs/resources/ipam_service.md b/docs/resources/ipam_service.md deleted file mode 100644 index 6d695e528..000000000 --- a/docs/resources/ipam_service.md +++ /dev/null @@ -1,97 +0,0 @@ -# netbox\_ipam\_service Resource - -Manage a service within Netbox. - -## Example Usage - -```hcl -resource "netbox_ipam_service" "service_test" { - name = "SMTP" - virtualmachine_id = netbox_virtualization_vm.vm_test.id - ip_addresses_id = [netbox_ipam_ip_addresses.ip_test.id] - ports = ["22"] - protocol = "tcp" - description = "Service created by terraform" - - tag { - name = "tag1" - slug = "tag1" - } - - custom_field { - name = "cf_boolean" - type = "boolean" - value = "true" - } - - custom_field { - name = "cf_date" - type = "date" - value = "2020-12-25" - } - - custom_field { - name = "cf_text" - type = "text" - value = "some text" - } - - custom_field { - name = "cf_integer" - type = "integer" - value = "10" - } - - custom_field { - name = "cf_selection" - type = "selection" - value = "1" - } - - custom_field { - name = "cf_url" - type = "url" - value = "https://github.com" - } - - custom_field { - name = "cf_multiple_selection" - type = "multiple" - value = "0,1" - } -} -``` - -## Argument Reference - -The following arguments are supported: -* ``description`` - (Optional) The description of this object. -* ``device_id`` - (Optional) The ID of the device linked to this object. -* ``ip_addresses_id`` - (Optional) Array of ID of the IP addresses attached to this object. -* ``name`` - (Required) The name for this object. -* ``ports`` - (Optional) Array of ports of this object. -* ``protocol`` - (Required) The protocol of this object (tcp or udp). -* ``virtualmachine_id`` - (Optional) The ID of the vm linked to this object. - -The ``custom_field`` block (optional) supports: -* ``name`` - (Required) Name of the existing custom resource to associate with this resource. -* ``type`` - (Required) Type of the existing custom resource to associate with this resource (text, integer, boolean, url, selection, multiple). -* ``value`` - (Required) Value of the existing custom resource to associate with this resource. - -The ``tag`` block (optional) supports: -* ``name`` - (Required) Name of the existing tag to associate with this resource. -* ``slug`` - (Required) Slug of the existing tag to associate with this resource. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``id`` - The id (ref in Netbox) of this object. -* ``content_type`` - The content type of this object. - -## Import - -Services can be imported by `id` e.g. - -``` -$ terraform import netbox_ipam_service.service_test id -``` diff --git a/docs/resources/ipam_vlan.md b/docs/resources/ipam_vlan.md deleted file mode 100644 index fe1b30085..000000000 --- a/docs/resources/ipam_vlan.md +++ /dev/null @@ -1,99 +0,0 @@ -# netbox\_ipam\_vlan Resource - -Manage a vlan within Netbox. - -## Example Usage - -```hcl -resource "netbox_ipam_vlan" "vlan_test" { - vlan_id = 100 - name = "TestVlan" - site_id = netbox_ipam_vlan_group.vlan_group_test.site_id - description = "VLAN created by terraform" - vlan_group_id = netbox_ipam_vlan_group.vlan_group_test.id - tenant_id = netbox_tenancy_tenant.tenant_test.id - role_id = data.netbox_ipam_roles.vlan_role_production.id - - tag { - name = "tag1" - slug = "tag1" - } - - custom_field { - name = "cf_boolean" - type = "boolean" - value = "true" - } - - custom_field { - name = "cf_date" - type = "date" - value = "2020-12-25" - } - - custom_field { - name = "cf_text" - type = "text" - value = "some text" - } - - custom_field { - name = "cf_integer" - type = "integer" - value = "10" - } - - custom_field { - name = "cf_selection" - type = "selection" - value = "1" - } - - custom_field { - name = "cf_url" - type = "url" - value = "https://github.com" - } - - custom_field { - name = "cf_multiple_selection" - type = "multiple" - value = "0,1" - } -} -``` - -## Argument Reference - -The following arguments are supported: -* ``description`` - (Optional) The description of this object. -* ``vlan_group_id`` - (Optional) ID of the group where this object belongs to. -* ``name`` - (Required) The name for this object. -* ``role_id`` - (Optional) The ID of the role attached to this object. -* ``site_id`` - (Optional) ID of the site where this object is created. -* ``status`` - (Optional) The status among container, active, reserved, deprecated (active by default). -* ``tenant_id`` - (Optional) ID of the tenant where this object is attached. -* ``vlan_id`` - (Required) The ID of this vlan (vlan tag). - -The ``custom_field`` block (optional) supports: -* ``name`` - (Required) Name of the existing custom resource to associate with this resource. -* ``type`` - (Required) Type of the existing custom resource to associate with this resource (text, integer, boolean, url, selection, multiple). -* ``value`` - (Required) Value of the existing custom resource to associate with this resource. - -The ``tag`` block supports: -* ``name`` - (Required) Name of the existing tag to associate with this resource. -* ``slug`` - (Required) Slug of the existing tag to associate with this resource. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``id`` - The id (ref in Netbox) of this object. -* ``content_type`` - The content type of this object. - -## Import - -Vlans can be imported by `id` e.g. - -``` -$ terraform import netbox_ipam_vlan.vlan_test id -``` diff --git a/docs/resources/ipam_vlan_group.md b/docs/resources/ipam_vlan_group.md deleted file mode 100644 index 49f0d8205..000000000 --- a/docs/resources/ipam_vlan_group.md +++ /dev/null @@ -1,41 +0,0 @@ -# netbox\_ipam\_vlan\_group Resource - -Manage a vlan group within Netbox. - -## Example Usage - -```hcl -resource "netbox_ipam_vlan_group" "vlan_group_test" { - name = "TestVlanGroup" - slug = "TestVlanGroup" - - tag { - name = "tag1" - slug = "tag1" - } -} -``` - -## Argument Reference - -The following arguments are supported: -* ``name`` - (Required) The name for this object. -* ``slug`` - (Required) The slug for this object. - -The ``tag`` block (optional) supports: -* ``name`` - (Required) Name of the existing tag to associate with this resource. -* ``slug`` - (Required) Slug of the existing tag to associate with this resource. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``id`` - The id (ref in Netbox) of this object. -* ``content_type`` - The content type of this object. - -## Import - -Vlan groups can be imported by `id` e.g. - -``` -$ terraform import netbox_ipam_vlan_group.vlan_group_test id -``` diff --git a/docs/resources/tenancy_contact.md b/docs/resources/tenancy_contact.md deleted file mode 100644 index 57662691d..000000000 --- a/docs/resources/tenancy_contact.md +++ /dev/null @@ -1,98 +0,0 @@ -# netbox\_tenancy\_contact Resource - -Manage a contact within Netbox. - -## Example Usage - -```hcl -resource "netbox_tenancy_contact" "contact_test" { - name = "John Doe" - title = "Someone in the world" - phone = "+330123456789" - email = "john.doe@unknown.com" - address = "Somewhere in the world" - comments = "Good contact" - contact_group_id = netbox_tenancy_contact_group.contact_group_02.id - - tag { - name = "tag1" - slug = "tag1" - } - - custom_field { - name = "cf_boolean" - type = "boolean" - value = "true" - } - - custom_field { - name = "cf_date" - type = "date" - value = "2020-12-25" - } - - custom_field { - name = "cf_text" - type = "text" - value = "some text" - } - - custom_field { - name = "cf_integer" - type = "integer" - value = "10" - } - - custom_field { - name = "cf_selection" - type = "selection" - value = "1" - } - - custom_field { - name = "cf_url" - type = "url" - value = "https://github.com" - } - - custom_field { - name = "cf_multiple_selection" - type = "multiple" - value = "0,1" - } -} -``` - -## Argument Reference - -The following arguments are supported: -* ``name`` - (Required) The name for this object. -* ``title`` - (Optional) The title for this object. -* ``phone`` - (Optional) The phone for this object. -* ``email`` - (Optional) The e-mail for this object. -* ``address`` - (Optional) The address for this object. -* ``comments`` - (Optional) Comments for this object. -* ``contact_group_id`` - (Optional) ID of the group where this object belongs to. - -The ``custom_field`` block (optional) supports: -* ``name`` - (Required) Name of the existing custom resource to associate with this resource. -* ``type`` - (Required) Type of the existing custom resource to associate with this resource (text, integer, boolean, url, selection, multiple). -* ``value`` - (Required) Value of the existing custom resource to associate with this resource. - -The ``tag`` block (optional) supports: -* ``name`` - (Required) Name of the existing tag to associate with this resource. -* ``slug`` - (Required) Slug of the existing tag to associate with this resource. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``id`` - The id (ref in Netbox) of this object. -* ``content_type`` - The content type of this object. - -## Import - -Tenants can be imported by `id` e.g. - -``` -$ terraform import netbox_tenancy_contact.contact_test id -``` diff --git a/docs/resources/tenancy_contact_assignment.md b/docs/resources/tenancy_contact_assignment.md deleted file mode 100644 index 4d6b8182a..000000000 --- a/docs/resources/tenancy_contact_assignment.md +++ /dev/null @@ -1,37 +0,0 @@ -# netbox\_tenancy\_contact\_assignment Resource - -Link a contact to another resource within Netbox. - -## Example Usage - -```hcl -resource "netbox_tenancy_contact_assignment" "contact_assignment_01" { - contact_id = netbox_tenancy_contact.contact.id - contact_role_id = netbox_tenancy_contact_role.contact_role_02.id - content_type = netbox_virtualization_vm.vm_test.content_type - object_id = netbox_virtualization_vm.vm_test.id - priority = "primary" -} -``` - -## Argument Reference - -The following arguments are supported: -* ``contact_id`` - (Required) ID of the contact to link to a resource. -* ``contact_role_id`` - (Required) The role of the contact for this resource. -* ``content_type`` - (Required) Type of the object where the contact will be linked. -* ``object_id`` - (Required) ID of the object where the contact will be linked. -* ``priority`` - (Required) Priority of this contact among primary, secondary and tertiary (primary by default). - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``id`` - The id (ref in Netbox) of this object. - -## Import - -Contact assignments can be imported by `id` e.g. - -``` -$ terraform import netbox_tenancy_contact_assignment.contact_assignment_test id -``` diff --git a/docs/resources/tenancy_contact_group.md b/docs/resources/tenancy_contact_group.md deleted file mode 100644 index 5b83c7141..000000000 --- a/docs/resources/tenancy_contact_group.md +++ /dev/null @@ -1,92 +0,0 @@ -# netbox\_tenancy\_contact\_group Resource - -Manage a contact group within Netbox. - -## Example Usage - -```hcl -resource "netbox_tenancy_contact_group" "contact_group_test" { - description = "Contact group created by terraform" - name = "TestContactGroup" - parent_id = 10 - slug = "TestContactGroup" - - tag { - name = "tag1" - slug = "tag1" - } - - custom_field { - name = "cf_boolean" - type = "boolean" - value = "true" - } - - custom_field { - name = "cf_date" - type = "date" - value = "2020-12-25" - } - - custom_field { - name = "cf_text" - type = "text" - value = "some text" - } - - custom_field { - name = "cf_integer" - type = "integer" - value = "10" - } - - custom_field { - name = "cf_selection" - type = "selection" - value = "1" - } - - custom_field { - name = "cf_url" - type = "url" - value = "https://github.com" - } - - custom_field { - name = "cf_multiple_selection" - type = "multiple" - value = "0,1" - } -} -``` - -## Argument Reference - -The following arguments are supported: -* ``description`` - (Optional) Description for this object. -* ``name`` - (Required) The name for this object. -* ``parent_id`` - (Optional) ID of the parent. -* ``slug`` - (Required) The slug for this object. - -The ``custom_field`` block (optional) supports: -* ``name`` - (Required) Name of the existing custom resource to associate with this resource. -* ``type`` - (Required) Type of the existing custom resource to associate with this resource (text, integer, boolean, url, selection, multiple). -* ``value`` - (Required) Value of the existing custom resource to associate with this resource. - -The ``tag`` block (optional) supports: -* ``name`` - (Required) Name of the existing tag to associate with this resource. -* ``slug`` - (Required) Slug of the existing tag to associate with this resource. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``id`` - The id (ref in Netbox) of this object. -* ``content_type`` - The content type of this object. - -## Import - -Contact groups can be imported by `id` e.g. - -``` -$ terraform import netbox_tenancy_contact_group.contact_group_test id -``` diff --git a/docs/resources/tenancy_contact_role.md b/docs/resources/tenancy_contact_role.md deleted file mode 100644 index 17579ed60..000000000 --- a/docs/resources/tenancy_contact_role.md +++ /dev/null @@ -1,90 +0,0 @@ -# netbox\_tenancy\_contact\_role Resource - -Manage a contact role within Netbox. - -## Example Usage - -```hcl -resource "netbox_tenancy_contact_role" "contact_role_test" { - description = "Contact role created by terraform" - name = "TestContactRole" - slug = "TestContactRole" - - tag { - name = "tag1" - slug = "tag1" - } - - custom_field { - name = "cf_boolean" - type = "boolean" - value = "true" - } - - custom_field { - name = "cf_date" - type = "date" - value = "2020-12-25" - } - - custom_field { - name = "cf_text" - type = "text" - value = "some text" - } - - custom_field { - name = "cf_integer" - type = "integer" - value = "10" - } - - custom_field { - name = "cf_selection" - type = "selection" - value = "1" - } - - custom_field { - name = "cf_url" - type = "url" - value = "https://github.com" - } - - custom_field { - name = "cf_multiple_selection" - type = "multiple" - value = "0,1" - } -} -``` - -## Argument Reference - -The following arguments are supported: -* ``description`` - (Optional) Description for this object. -* ``name`` - (Required) The name for this object. -* ``slug`` - (Required) The slug for this object. - -The ``custom_field`` block (optional) supports: -* ``name`` - (Required) Name of the existing custom resource to associate with this resource. -* ``type`` - (Required) Type of the existing custom resource to associate with this resource (text, integer, boolean, url, selection, multiple). -* ``value`` - (Required) Value of the existing custom resource to associate with this resource. - -The ``tag`` block (optional) supports: -* ``name`` - (Required) Name of the existing tag to associate with this resource. -* ``slug`` - (Required) Slug of the existing tag to associate with this resource. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``id`` - The id (ref in Netbox) of this object. -* ``content_type`` - The content type of this object. - -## Import - -Contact roles can be imported by `id` e.g. - -``` -$ terraform import netbox_tenancy_contact_role.contact_role_test id -``` diff --git a/docs/resources/tenancy_tenant.md b/docs/resources/tenancy_tenant.md deleted file mode 100644 index 616a7a878..000000000 --- a/docs/resources/tenancy_tenant.md +++ /dev/null @@ -1,94 +0,0 @@ -# netbox\_tenancy\_tenant Resource - -Manage a tenant within Netbox. - -## Example Usage - -```hcl -resource "netbox_tenancy_tenant" "tenant_test" { - name = "TestTenant" - slug = "TestTenant" - description = "Tenant created by terraform" - comments = "Some test comments" - tenant_group_id = netbox_tenancy_tenant_group.tenant_group_test.id - - tag { - name = "tag1" - slug = "tag1" - } - - custom_field { - name = "cf_boolean" - type = "boolean" - value = "true" - } - - custom_field { - name = "cf_date" - type = "date" - value = "2020-12-25" - } - - custom_field { - name = "cf_text" - type = "text" - value = "some text" - } - - custom_field { - name = "cf_integer" - type = "integer" - value = "10" - } - - custom_field { - name = "cf_selection" - type = "selection" - value = "1" - } - - custom_field { - name = "cf_url" - type = "url" - value = "https://github.com" - } - - custom_field { - name = "cf_multiple_selection" - type = "multiple" - value = "0,1" - } -} -``` - -## Argument Reference - -The following arguments are supported: -* ``comments`` - (Optional) Comments for this object. -* ``description`` - (Optional) The description for this object. -* ``tenant_group_id`` - (Optional) ID of the group where this object is located. -* ``name`` - (Required) The name for this object. -* ``slug`` - (Required) The slug for this object. - -The ``custom_field`` block (optional) supports: -* ``name`` - (Required) Name of the existing custom resource to associate with this resource. -* ``type`` - (Required) Type of the existing custom resource to associate with this resource (text, integer, boolean, url, selection, multiple). -* ``value`` - (Required) Value of the existing custom resource to associate with this resource. - -The ``tag`` block (optional) supports: -* ``name`` - (Required) Name of the existing tag to associate with this resource. -* ``slug`` - (Required) Slug of the existing tag to associate with this resource. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``id`` - The id (ref in Netbox) of this object. -* ``content_type`` - The content type of this object. - -## Import - -Tenants can be imported by `id` e.g. - -``` -$ terraform import netbox_tenancy_tenant.tenant_test id -``` diff --git a/docs/resources/tenancy_tenant_group.md b/docs/resources/tenancy_tenant_group.md deleted file mode 100644 index 86e52a31b..000000000 --- a/docs/resources/tenancy_tenant_group.md +++ /dev/null @@ -1,41 +0,0 @@ -# netbox\_tenancy\_tenant\_group Resource - -Manage a tenant group within Netbox. - -## Example Usage - -```hcl -resource "netbox_tenancy_tenant_group" "tenant_group_test" { - name = "TestTenantGroup" - slug = "TestTenantGroup" - - tag { - name = "tag1" - slug = "tag1" - } -} -``` - -## Argument Reference - -The following arguments are supported: -* ``name`` - (Required) The name for this object. -* ``slug`` - (Required) The slug for this object. - -The ``tag`` block (optional) supports: -* ``name`` - (Required) Name of the existing tag to associate with this resource. -* ``slug`` - (Required) Slug of the existing tag to associate with this resource. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``id`` - The id (ref in Netbox) of this object. -* ``content_type`` - The content type of this object. - -## Import - -Tenant groups can be imported by `id` e.g. - -``` -$ terraform import netbox_tenancy_tenant_group.tenant_group_test id -``` diff --git a/docs/resources/virtualization_interface.md b/docs/resources/virtualization_interface.md deleted file mode 100644 index 4df88c304..000000000 --- a/docs/resources/virtualization_interface.md +++ /dev/null @@ -1,93 +0,0 @@ -# netbox\_virtualization\_interface Resource - -Manage an interface resource within Netbox. - -## Example Usage - -```hcl -resource "netbox_virtualization_interface" "interface_test" { - name = "default" - virtualmachine_id = netbox_virtualization_vm.vm_test.id - mac_address = "AA:AA:AA:AA:AA:AA" - mtu = 1500 - description = "Interface de test" - - custom_field { - name = "cf_boolean" - type = "boolean" - value = "true" - } - - custom_field { - name = "cf_date" - type = "date" - value = "2020-12-25" - } - - custom_field { - name = "cf_text" - type = "text" - value = "some text" - } - - custom_field { - name = "cf_integer" - type = "integer" - value = "10" - } - - custom_field { - name = "cf_selection" - type = "selection" - value = "1" - } - - custom_field { - name = "cf_url" - type = "url" - value = "https://github.com" - } - - custom_field { - name = "cf_multiple_selection" - type = "multiple" - value = "0,1" - } -} -``` - -## Argument Reference - -The following arguments are supported: -* ``description`` - (Optional) Description for this object. -* ``enabled`` - (Optional) true or false (true by default). -* ``mac_address`` - (Optional) Mac address for this object. -* ``mode`` - (Optional) The mode among access, tagged, tagged-all. -* ``mtu`` - (Optional) The MTU between 1 and 65536 for this object. -* ``name`` - (Required) The name for this object. -* ``tagged_vlans`` - (Optional) List of vlan id tagged for this interface -* ``untagged_vlans`` - (Optional) Vlan id untagged for this interface -* ``virtualmachine_id`` - (Required) ID of the virtual machine where this object -is attached -The ``tag`` block supports: -* ``name`` - (Required) Name of the existing tag to associate with this resource. -* ``slug`` - (Required) Slug of the existing tag to associate with this resource. - -The ``custom_field`` block (optional) supports: -* ``name`` - (Required) Name of the existing custom resource to associate with this resource. -* ``type`` - (Required) Type of the existing custom resource to associate with this resource (text, integer, boolean, url, selection, multiple). -* ``value`` - (Required) Value of the existing custom resource to associate with this resource. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``id`` - The id (ref in Netbox) of this object. -* ``content_type`` - The content type of this object. - -## Import - -Virtualization interfaces can be imported by `id` e.g. - -``` -$ terraform import netbox_virtualization_interface.interface_test id -``` diff --git a/docs/resources/virtualization_vm.md b/docs/resources/virtualization_vm.md deleted file mode 100644 index 4fbe017bc..000000000 --- a/docs/resources/virtualization_vm.md +++ /dev/null @@ -1,107 +0,0 @@ -# netbox\_virtualization\_vm Resource - -Manage a virtual machine resource within Netbox. - -## Example Usage - -```hcl -resource "netbox_virtualization_vm" "vm_test" { - name = "TestVm" - comments = "VM created by terraform" - vcpus = "2.00" - disk = 50 - memory = 16 - cluster_id = 1 - local_context_data = jsonencode( - { - hello = "world" - number = 1 - } - ) - - tag { - name = "tag1" - slug = "tag1" - } - - custom_field { - name = "cf_boolean" - type = "boolean" - value = "true" - } - - custom_field { - name = "cf_date" - type = "date" - value = "2020-12-25" - } - - custom_field { - name = "cf_text" - type = "text" - value = "some text" - } - - custom_field { - name = "cf_integer" - type = "integer" - value = "10" - } - - custom_field { - name = "cf_selection" - type = "selection" - value = "1" - } - - custom_field { - name = "cf_url" - type = "url" - value = "https://github.com" - } - - custom_field { - name = "cf_multiple_selection" - type = "multiple" - value = "0,1" - } -} -``` - -## Argument Reference - -The following arguments are supported: -* ``cluster_id`` - (Required) ID of the cluster which host this object. -* ``comments`` - (Optional) Comments for this object. -* ``disk`` - (Optional) The size in GB of the disk for this object. -* ``local_context_data`` - (Optional) Local context data for this object. -* ``memory`` - (Optional) The size in MB of the memory of this object. -* ``name`` - (Required) The name for this object. -* ``platform_id`` - (Optional) ID of the platform for this object. -* ``role_id`` - (Optional) ID of the role for this object. -* ``status`` - (Optional) The status among offline, active, planned, staged, failed or decommissioning (active by default). -* ``tenant_id`` - (Optional) ID of the tenant where this object is attached. -* ``vcpus`` - (Optional) The number of VCPUS for this object. - -The ``custom_field`` block (optional) supports: -* ``name`` - (Required) Name of the existing custom resource to associate with this resource. -* ``type`` - (Required) Type of the existing custom resource to associate with this resource (text, integer, boolean, url, selection, multiple). -* ``value`` - (Required) Value of the existing custom resource to associate with this resource. - -The ``tag`` block (optional) supports: -* ``name`` - (Required) Name of the existing tag to associate with this resource. -* ``slug`` - (Required) Slug of the existing tag to associate with this resource. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* ``id`` - The id (ref in Netbox) of this object. -* ``content_type`` - The content type of this object. - -## Import - -Virtualization vms can be imported by `id` e.g. - -``` -$ terraform import netbox_virtualization_vm.vm_test id -``` diff --git a/examples/data-sources/netbox_dcim_platform/data-source.tf b/examples/data-sources/netbox_dcim_platform/data-source.tf new file mode 100644 index 000000000..d83590619 --- /dev/null +++ b/examples/data-sources/netbox_dcim_platform/data-source.tf @@ -0,0 +1,3 @@ +data "netbox_dcim_platform" "platform_test" { + slug = "TestPlatform" +} diff --git a/examples/data-sources/netbox_dcim_site/data-source.tf b/examples/data-sources/netbox_dcim_site/data-source.tf new file mode 100644 index 000000000..7b85ceebb --- /dev/null +++ b/examples/data-sources/netbox_dcim_site/data-source.tf @@ -0,0 +1,3 @@ +data "netbox_dcim_site" "site_test" { + slug = "TestSite" +} diff --git a/examples/data-sources/netbox_ipam_aggregate/data-source.tf b/examples/data-sources/netbox_ipam_aggregate/data-source.tf new file mode 100644 index 000000000..39b015870 --- /dev/null +++ b/examples/data-sources/netbox_ipam_aggregate/data-source.tf @@ -0,0 +1,4 @@ +data "netbox_ipam_aggregate" "aggregate_test" { + prefix = "192.168.56.0/24" + rir_id = 1 +} diff --git a/examples/data-sources/netbox_ipam_ip_addresses/data-source.tf b/examples/data-sources/netbox_ipam_ip_addresses/data-source.tf new file mode 100644 index 000000000..35dbe3670 --- /dev/null +++ b/examples/data-sources/netbox_ipam_ip_addresses/data-source.tf @@ -0,0 +1,3 @@ +data "netbox_ipam_ip_addresses" "ipaddress_test" { + address = "192.168.56.1/24" +} diff --git a/examples/data-sources/netbox_ipam_role/data-source.tf b/examples/data-sources/netbox_ipam_role/data-source.tf new file mode 100644 index 000000000..416adf7c4 --- /dev/null +++ b/examples/data-sources/netbox_ipam_role/data-source.tf @@ -0,0 +1,3 @@ +data "netbox_ipam_role" "role_test" { + slug = "TestRole" +} diff --git a/examples/data-sources/netbox_ipam_service/data-source.tf b/examples/data-sources/netbox_ipam_service/data-source.tf new file mode 100644 index 000000000..ade999983 --- /dev/null +++ b/examples/data-sources/netbox_ipam_service/data-source.tf @@ -0,0 +1,6 @@ +data "netbox_ipam_service" "service_test" { + device_id = 5 + name = "Mail" + port = 25 + protocol = "tcp" +} diff --git a/examples/data-sources/netbox_ipam_vlan/data-source.tf b/examples/data-sources/netbox_ipam_vlan/data-source.tf new file mode 100644 index 000000000..9a2a28187 --- /dev/null +++ b/examples/data-sources/netbox_ipam_vlan/data-source.tf @@ -0,0 +1,4 @@ +data "netbox_ipam_vlan" "vlan_test" { + vlan_id = 15 + vlan_group_id = 16 +} diff --git a/examples/data-sources/netbox_ipam_vlan_group/data-source.tf b/examples/data-sources/netbox_ipam_vlan_group/data-source.tf new file mode 100644 index 000000000..11bc86e03 --- /dev/null +++ b/examples/data-sources/netbox_ipam_vlan_group/data-source.tf @@ -0,0 +1,3 @@ +data "netbox_ipam_vlan_group" "vlan_group_test" { + slug = "TestVlanGroup" +} diff --git a/examples/data-sources/netbox_json_circuits_circuit_terminations_list/data-source.tf b/examples/data-sources/netbox_json_circuits_circuit_terminations_list/data-source.tf new file mode 100644 index 000000000..53d41f468 --- /dev/null +++ b/examples/data-sources/netbox_json_circuits_circuit_terminations_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_circuits_circuit_terminations_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_circuits_circuit_terminations_list.test.json) +} diff --git a/examples/data-sources/netbox_json_circuits_circuit_types_list/data-source.tf b/examples/data-sources/netbox_json_circuits_circuit_types_list/data-source.tf new file mode 100644 index 000000000..363fac742 --- /dev/null +++ b/examples/data-sources/netbox_json_circuits_circuit_types_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_circuits_circuit_types_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_circuits_circuit_types_list.test.json) +} diff --git a/examples/data-sources/netbox_json_circuits_circuits_list/data-source.tf b/examples/data-sources/netbox_json_circuits_circuits_list/data-source.tf new file mode 100644 index 000000000..5ddb059b5 --- /dev/null +++ b/examples/data-sources/netbox_json_circuits_circuits_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_circuits_circuits_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_circuits_circuits_list.test.json) +} diff --git a/examples/data-sources/netbox_json_circuits_provider_networks_list/data-source.tf b/examples/data-sources/netbox_json_circuits_provider_networks_list/data-source.tf new file mode 100644 index 000000000..66d96a5ea --- /dev/null +++ b/examples/data-sources/netbox_json_circuits_provider_networks_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_circuits_provider_networks_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_circuits_provider_networks_list.test.json) +} diff --git a/examples/data-sources/netbox_json_circuits_providers_list/data-source.tf b/examples/data-sources/netbox_json_circuits_providers_list/data-source.tf new file mode 100644 index 000000000..6a7189ab1 --- /dev/null +++ b/examples/data-sources/netbox_json_circuits_providers_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_circuits_providers_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_circuits_providers_list.test.json) +} diff --git a/examples/data-sources/netbox_json_dcim_cables_list/data-source.tf b/examples/data-sources/netbox_json_dcim_cables_list/data-source.tf new file mode 100644 index 000000000..195b96c7d --- /dev/null +++ b/examples/data-sources/netbox_json_dcim_cables_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_dcim_cables_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_dcim_cables_list.test.json) +} diff --git a/examples/data-sources/netbox_json_dcim_console_port_templates_list/data-source.tf b/examples/data-sources/netbox_json_dcim_console_port_templates_list/data-source.tf new file mode 100644 index 000000000..aabf71bdc --- /dev/null +++ b/examples/data-sources/netbox_json_dcim_console_port_templates_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_dcim_console_port_templates_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_dcim_console_port_templates_list.test.json) +} diff --git a/examples/data-sources/netbox_json_dcim_console_ports_list/data-source.tf b/examples/data-sources/netbox_json_dcim_console_ports_list/data-source.tf new file mode 100644 index 000000000..ba4cfa448 --- /dev/null +++ b/examples/data-sources/netbox_json_dcim_console_ports_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_dcim_console_ports_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_dcim_console_ports_list.test.json) +} diff --git a/examples/data-sources/netbox_json_dcim_console_server_port_templates_list/data-source.tf b/examples/data-sources/netbox_json_dcim_console_server_port_templates_list/data-source.tf new file mode 100644 index 000000000..60b62d247 --- /dev/null +++ b/examples/data-sources/netbox_json_dcim_console_server_port_templates_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_dcim_console_server_port_templates_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_dcim_console_server_port_templates_list.test.json) +} diff --git a/examples/data-sources/netbox_json_dcim_console_server_ports_list/data-source.tf b/examples/data-sources/netbox_json_dcim_console_server_ports_list/data-source.tf new file mode 100644 index 000000000..e52016bde --- /dev/null +++ b/examples/data-sources/netbox_json_dcim_console_server_ports_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_dcim_console_server_ports_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_dcim_console_server_ports_list.test.json) +} diff --git a/examples/data-sources/netbox_json_dcim_device_bay_templates_list/data-source.tf b/examples/data-sources/netbox_json_dcim_device_bay_templates_list/data-source.tf new file mode 100644 index 000000000..b4f83949b --- /dev/null +++ b/examples/data-sources/netbox_json_dcim_device_bay_templates_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_dcim_device_bay_templates_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_dcim_device_bay_templates_list.test.json) +} diff --git a/examples/data-sources/netbox_json_dcim_device_bays_list/data-source.tf b/examples/data-sources/netbox_json_dcim_device_bays_list/data-source.tf new file mode 100644 index 000000000..44f59fdbe --- /dev/null +++ b/examples/data-sources/netbox_json_dcim_device_bays_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_dcim_device_bays_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_dcim_device_bays_list.test.json) +} diff --git a/examples/data-sources/netbox_json_dcim_device_roles_list/data-source.tf b/examples/data-sources/netbox_json_dcim_device_roles_list/data-source.tf new file mode 100644 index 000000000..121265eec --- /dev/null +++ b/examples/data-sources/netbox_json_dcim_device_roles_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_dcim_device_roles_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_dcim_device_roles_list.test.json) +} diff --git a/examples/data-sources/netbox_json_dcim_device_types_list/data-source.tf b/examples/data-sources/netbox_json_dcim_device_types_list/data-source.tf new file mode 100644 index 000000000..6edbe8ede --- /dev/null +++ b/examples/data-sources/netbox_json_dcim_device_types_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_dcim_device_types_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_dcim_device_types_list.test.json) +} diff --git a/examples/data-sources/netbox_json_dcim_devices_list/data-source.tf b/examples/data-sources/netbox_json_dcim_devices_list/data-source.tf new file mode 100644 index 000000000..c524f6806 --- /dev/null +++ b/examples/data-sources/netbox_json_dcim_devices_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_dcim_devices_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_dcim_devices_list.test.json) +} diff --git a/examples/data-sources/netbox_json_dcim_front_port_templates_list/data-source.tf b/examples/data-sources/netbox_json_dcim_front_port_templates_list/data-source.tf new file mode 100644 index 000000000..60244ca40 --- /dev/null +++ b/examples/data-sources/netbox_json_dcim_front_port_templates_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_dcim_front_port_templates_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_dcim_front_port_templates_list.test.json) +} diff --git a/examples/data-sources/netbox_json_dcim_front_ports_list/data-source.tf b/examples/data-sources/netbox_json_dcim_front_ports_list/data-source.tf new file mode 100644 index 000000000..df7c74b0b --- /dev/null +++ b/examples/data-sources/netbox_json_dcim_front_ports_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_dcim_front_ports_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_dcim_front_ports_list.test.json) +} diff --git a/examples/data-sources/netbox_json_dcim_interface_templates_list/data-source.tf b/examples/data-sources/netbox_json_dcim_interface_templates_list/data-source.tf new file mode 100644 index 000000000..742b1627e --- /dev/null +++ b/examples/data-sources/netbox_json_dcim_interface_templates_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_dcim_interface_templates_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_dcim_interface_templates_list.test.json) +} diff --git a/examples/data-sources/netbox_json_dcim_interfaces_list/data-source.tf b/examples/data-sources/netbox_json_dcim_interfaces_list/data-source.tf new file mode 100644 index 000000000..4028c1af7 --- /dev/null +++ b/examples/data-sources/netbox_json_dcim_interfaces_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_dcim_interfaces_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_dcim_interfaces_list.test.json) +} diff --git a/examples/data-sources/netbox_json_dcim_inventory_items_list/data-source.tf b/examples/data-sources/netbox_json_dcim_inventory_items_list/data-source.tf new file mode 100644 index 000000000..3dbee8626 --- /dev/null +++ b/examples/data-sources/netbox_json_dcim_inventory_items_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_dcim_inventory_items_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_dcim_inventory_items_list.test.json) +} diff --git a/examples/data-sources/netbox_json_dcim_locations_list/data-source.tf b/examples/data-sources/netbox_json_dcim_locations_list/data-source.tf new file mode 100644 index 000000000..3b9b370ea --- /dev/null +++ b/examples/data-sources/netbox_json_dcim_locations_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_dcim_locations_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_dcim_locations_list.test.json) +} diff --git a/examples/data-sources/netbox_json_dcim_manufacturers_list/data-source.tf b/examples/data-sources/netbox_json_dcim_manufacturers_list/data-source.tf new file mode 100644 index 000000000..39cbae863 --- /dev/null +++ b/examples/data-sources/netbox_json_dcim_manufacturers_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_dcim_manufacturers_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_dcim_manufacturers_list.test.json) +} diff --git a/examples/data-sources/netbox_json_dcim_platforms_list/data-source.tf b/examples/data-sources/netbox_json_dcim_platforms_list/data-source.tf new file mode 100644 index 000000000..8922108e5 --- /dev/null +++ b/examples/data-sources/netbox_json_dcim_platforms_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_dcim_platforms_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_dcim_platforms_list.test.json) +} diff --git a/examples/data-sources/netbox_json_dcim_power_feeds_list/data-source.tf b/examples/data-sources/netbox_json_dcim_power_feeds_list/data-source.tf new file mode 100644 index 000000000..d71401897 --- /dev/null +++ b/examples/data-sources/netbox_json_dcim_power_feeds_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_dcim_power_feeds_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_dcim_power_feeds_list.test.json) +} diff --git a/examples/data-sources/netbox_json_dcim_power_outlet_templates_list/data-source.tf b/examples/data-sources/netbox_json_dcim_power_outlet_templates_list/data-source.tf new file mode 100644 index 000000000..e022fdc73 --- /dev/null +++ b/examples/data-sources/netbox_json_dcim_power_outlet_templates_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_dcim_power_outlet_templates_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_dcim_power_outlet_templates_list.test.json) +} diff --git a/examples/data-sources/netbox_json_dcim_power_outlets_list/data-source.tf b/examples/data-sources/netbox_json_dcim_power_outlets_list/data-source.tf new file mode 100644 index 000000000..b1278ba1d --- /dev/null +++ b/examples/data-sources/netbox_json_dcim_power_outlets_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_dcim_power_outlets_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_dcim_power_outlets_list.test.json) +} diff --git a/examples/data-sources/netbox_json_dcim_power_panels_list/data-source.tf b/examples/data-sources/netbox_json_dcim_power_panels_list/data-source.tf new file mode 100644 index 000000000..53d250416 --- /dev/null +++ b/examples/data-sources/netbox_json_dcim_power_panels_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_dcim_power_panels_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_dcim_power_panels_list.test.json) +} diff --git a/examples/data-sources/netbox_json_dcim_power_port_templates_list/data-source.tf b/examples/data-sources/netbox_json_dcim_power_port_templates_list/data-source.tf new file mode 100644 index 000000000..7c6afa798 --- /dev/null +++ b/examples/data-sources/netbox_json_dcim_power_port_templates_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_dcim_power_port_templates_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_dcim_power_port_templates_list.test.json) +} diff --git a/examples/data-sources/netbox_json_dcim_power_ports_list/data-source.tf b/examples/data-sources/netbox_json_dcim_power_ports_list/data-source.tf new file mode 100644 index 000000000..dd7c3cada --- /dev/null +++ b/examples/data-sources/netbox_json_dcim_power_ports_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_dcim_power_ports_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_dcim_power_ports_list.test.json) +} diff --git a/examples/data-sources/netbox_json_dcim_rack_reservations_list/data-source.tf b/examples/data-sources/netbox_json_dcim_rack_reservations_list/data-source.tf new file mode 100644 index 000000000..ed6148d77 --- /dev/null +++ b/examples/data-sources/netbox_json_dcim_rack_reservations_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_dcim_rack_reservations_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_dcim_rack_reservations_list.test.json) +} diff --git a/examples/data-sources/netbox_json_dcim_rack_roles_list/data-source.tf b/examples/data-sources/netbox_json_dcim_rack_roles_list/data-source.tf new file mode 100644 index 000000000..cb877bb72 --- /dev/null +++ b/examples/data-sources/netbox_json_dcim_rack_roles_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_dcim_rack_roles_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_dcim_rack_roles_list.test.json) +} diff --git a/examples/data-sources/netbox_json_dcim_racks_list/data-source.tf b/examples/data-sources/netbox_json_dcim_racks_list/data-source.tf new file mode 100644 index 000000000..d3daffcc5 --- /dev/null +++ b/examples/data-sources/netbox_json_dcim_racks_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_dcim_racks_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_dcim_racks_list.test.json) +} diff --git a/examples/data-sources/netbox_json_dcim_rear_port_templates_list/data-source.tf b/examples/data-sources/netbox_json_dcim_rear_port_templates_list/data-source.tf new file mode 100644 index 000000000..4aef52b79 --- /dev/null +++ b/examples/data-sources/netbox_json_dcim_rear_port_templates_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_dcim_rear_port_templates_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_dcim_rear_port_templates_list.test.json) +} diff --git a/examples/data-sources/netbox_json_dcim_rear_ports_list/data-source.tf b/examples/data-sources/netbox_json_dcim_rear_ports_list/data-source.tf new file mode 100644 index 000000000..e405c2573 --- /dev/null +++ b/examples/data-sources/netbox_json_dcim_rear_ports_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_dcim_rear_ports_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_dcim_rear_ports_list.test.json) +} diff --git a/examples/data-sources/netbox_json_dcim_regions_list/data-source.tf b/examples/data-sources/netbox_json_dcim_regions_list/data-source.tf new file mode 100644 index 000000000..4ed692a80 --- /dev/null +++ b/examples/data-sources/netbox_json_dcim_regions_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_dcim_regions_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_dcim_regions_list.test.json) +} diff --git a/examples/data-sources/netbox_json_dcim_site_groups_list/data-source.tf b/examples/data-sources/netbox_json_dcim_site_groups_list/data-source.tf new file mode 100644 index 000000000..19bc67019 --- /dev/null +++ b/examples/data-sources/netbox_json_dcim_site_groups_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_dcim_site_groups_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_dcim_site_groups_list.test.json) +} diff --git a/examples/data-sources/netbox_json_dcim_sites_list/data-source.tf b/examples/data-sources/netbox_json_dcim_sites_list/data-source.tf new file mode 100644 index 000000000..d47f26f9b --- /dev/null +++ b/examples/data-sources/netbox_json_dcim_sites_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_dcim_sites_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_dcim_sites_list.test.json) +} diff --git a/examples/data-sources/netbox_json_dcim_virtual_chassis_list/data-source.tf b/examples/data-sources/netbox_json_dcim_virtual_chassis_list/data-source.tf new file mode 100644 index 000000000..38acdaa19 --- /dev/null +++ b/examples/data-sources/netbox_json_dcim_virtual_chassis_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_dcim_virtual_chassis_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_dcim_virtual_chassis_list.test.json) +} diff --git a/examples/data-sources/netbox_json_extras_config_contexts_list/data-source.tf b/examples/data-sources/netbox_json_extras_config_contexts_list/data-source.tf new file mode 100644 index 000000000..7b4d7370d --- /dev/null +++ b/examples/data-sources/netbox_json_extras_config_contexts_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_extras_config_contexts_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_extras_config_contexts_list.test.json) +} diff --git a/examples/data-sources/netbox_json_extras_content_types_list/data-source.tf b/examples/data-sources/netbox_json_extras_content_types_list/data-source.tf new file mode 100644 index 000000000..a02fdd280 --- /dev/null +++ b/examples/data-sources/netbox_json_extras_content_types_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_extras_content_types_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_extras_content_types_list.test.json) +} diff --git a/examples/data-sources/netbox_json_extras_custom_fields_list/data-source.tf b/examples/data-sources/netbox_json_extras_custom_fields_list/data-source.tf new file mode 100644 index 000000000..05b02b498 --- /dev/null +++ b/examples/data-sources/netbox_json_extras_custom_fields_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_extras_custom_fields_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_extras_custom_fields_list.test.json) +} diff --git a/examples/data-sources/netbox_json_extras_custom_links_list/data-source.tf b/examples/data-sources/netbox_json_extras_custom_links_list/data-source.tf new file mode 100644 index 000000000..ffe5468ac --- /dev/null +++ b/examples/data-sources/netbox_json_extras_custom_links_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_extras_custom_links_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_extras_custom_links_list.test.json) +} diff --git a/examples/data-sources/netbox_json_extras_export_templates_list/data-source.tf b/examples/data-sources/netbox_json_extras_export_templates_list/data-source.tf new file mode 100644 index 000000000..b6a883fe0 --- /dev/null +++ b/examples/data-sources/netbox_json_extras_export_templates_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_extras_export_templates_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_extras_export_templates_list.test.json) +} diff --git a/examples/data-sources/netbox_json_extras_image_attachments_list/data-source.tf b/examples/data-sources/netbox_json_extras_image_attachments_list/data-source.tf new file mode 100644 index 000000000..ebb78acc1 --- /dev/null +++ b/examples/data-sources/netbox_json_extras_image_attachments_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_extras_image_attachments_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_extras_image_attachments_list.test.json) +} diff --git a/examples/data-sources/netbox_json_extras_job_results_list/data-source.tf b/examples/data-sources/netbox_json_extras_job_results_list/data-source.tf new file mode 100644 index 000000000..d65701ca4 --- /dev/null +++ b/examples/data-sources/netbox_json_extras_job_results_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_extras_job_results_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_extras_job_results_list.test.json) +} diff --git a/examples/data-sources/netbox_json_extras_journal_entries_list/data-source.tf b/examples/data-sources/netbox_json_extras_journal_entries_list/data-source.tf new file mode 100644 index 000000000..1b0aac278 --- /dev/null +++ b/examples/data-sources/netbox_json_extras_journal_entries_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_extras_journal_entries_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_extras_journal_entries_list.test.json) +} diff --git a/examples/data-sources/netbox_json_extras_object_changes_list/data-source.tf b/examples/data-sources/netbox_json_extras_object_changes_list/data-source.tf new file mode 100644 index 000000000..c82ada1c9 --- /dev/null +++ b/examples/data-sources/netbox_json_extras_object_changes_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_extras_object_changes_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_extras_object_changes_list.test.json) +} diff --git a/examples/data-sources/netbox_json_extras_tags_list/data-source.tf b/examples/data-sources/netbox_json_extras_tags_list/data-source.tf new file mode 100644 index 000000000..07d9c58b7 --- /dev/null +++ b/examples/data-sources/netbox_json_extras_tags_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_extras_tags_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_extras_tags_list.test.json) +} diff --git a/examples/data-sources/netbox_json_extras_webhooks_list/data-source.tf b/examples/data-sources/netbox_json_extras_webhooks_list/data-source.tf new file mode 100644 index 000000000..479f4c9dc --- /dev/null +++ b/examples/data-sources/netbox_json_extras_webhooks_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_extras_webhooks_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_extras_webhooks_list.test.json) +} diff --git a/examples/data-sources/netbox_json_ipam_aggregates_list/data-source.tf b/examples/data-sources/netbox_json_ipam_aggregates_list/data-source.tf new file mode 100644 index 000000000..96a7985aa --- /dev/null +++ b/examples/data-sources/netbox_json_ipam_aggregates_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_ipam_aggregates_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_ipam_aggregates_list.test.json) +} diff --git a/examples/data-sources/netbox_json_ipam_asns_list/data-source.tf b/examples/data-sources/netbox_json_ipam_asns_list/data-source.tf new file mode 100644 index 000000000..f9ceed82d --- /dev/null +++ b/examples/data-sources/netbox_json_ipam_asns_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_ipam_asns_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_ipam_asns_list.test.json) +} diff --git a/examples/data-sources/netbox_json_ipam_fhrp_group_assignments_list/data-source.tf b/examples/data-sources/netbox_json_ipam_fhrp_group_assignments_list/data-source.tf new file mode 100644 index 000000000..b909387f1 --- /dev/null +++ b/examples/data-sources/netbox_json_ipam_fhrp_group_assignments_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_ipam_fhrp_group_assignments_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_ipam_fhrp_group_assignments_list.test.json) +} diff --git a/examples/data-sources/netbox_json_ipam_fhrp_groups_list/data-source.tf b/examples/data-sources/netbox_json_ipam_fhrp_groups_list/data-source.tf new file mode 100644 index 000000000..ccd738f34 --- /dev/null +++ b/examples/data-sources/netbox_json_ipam_fhrp_groups_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_ipam_fhrp_groups_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_ipam_fhrp_groups_list.test.json) +} diff --git a/examples/data-sources/netbox_json_ipam_ip_addresses_list/data-source.tf b/examples/data-sources/netbox_json_ipam_ip_addresses_list/data-source.tf new file mode 100644 index 000000000..274d7584a --- /dev/null +++ b/examples/data-sources/netbox_json_ipam_ip_addresses_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_ipam_ip_addresses_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_ipam_ip_addresses_list.test.json) +} diff --git a/examples/data-sources/netbox_json_ipam_ip_ranges_list/data-source.tf b/examples/data-sources/netbox_json_ipam_ip_ranges_list/data-source.tf new file mode 100644 index 000000000..1c70b3d88 --- /dev/null +++ b/examples/data-sources/netbox_json_ipam_ip_ranges_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_ipam_ip_ranges_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_ipam_ip_ranges_list.test.json) +} diff --git a/examples/data-sources/netbox_json_ipam_prefixes_list/data-source.tf b/examples/data-sources/netbox_json_ipam_prefixes_list/data-source.tf new file mode 100644 index 000000000..621bfb796 --- /dev/null +++ b/examples/data-sources/netbox_json_ipam_prefixes_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_ipam_prefixes_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_ipam_prefixes_list.test.json) +} diff --git a/examples/data-sources/netbox_json_ipam_rirs_list/data-source.tf b/examples/data-sources/netbox_json_ipam_rirs_list/data-source.tf new file mode 100644 index 000000000..e1a020ec4 --- /dev/null +++ b/examples/data-sources/netbox_json_ipam_rirs_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_ipam_rirs_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_ipam_rirs_list.test.json) +} diff --git a/examples/data-sources/netbox_json_ipam_roles_list/data-source.tf b/examples/data-sources/netbox_json_ipam_roles_list/data-source.tf new file mode 100644 index 000000000..4acd0ea92 --- /dev/null +++ b/examples/data-sources/netbox_json_ipam_roles_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_ipam_roles_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_ipam_roles_list.test.json) +} diff --git a/examples/data-sources/netbox_json_ipam_route_targets_list/data-source.tf b/examples/data-sources/netbox_json_ipam_route_targets_list/data-source.tf new file mode 100644 index 000000000..1bdbc6727 --- /dev/null +++ b/examples/data-sources/netbox_json_ipam_route_targets_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_ipam_route_targets_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_ipam_route_targets_list.test.json) +} diff --git a/examples/data-sources/netbox_json_ipam_services_list/data-source.tf b/examples/data-sources/netbox_json_ipam_services_list/data-source.tf new file mode 100644 index 000000000..e3da3eb69 --- /dev/null +++ b/examples/data-sources/netbox_json_ipam_services_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_ipam_services_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_ipam_services_list.test.json) +} diff --git a/examples/data-sources/netbox_json_ipam_vlan_groups_list/data-source.tf b/examples/data-sources/netbox_json_ipam_vlan_groups_list/data-source.tf new file mode 100644 index 000000000..2a0195d3c --- /dev/null +++ b/examples/data-sources/netbox_json_ipam_vlan_groups_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_ipam_vlan_groups_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_ipam_vlan_groups_list.test.json) +} diff --git a/examples/data-sources/netbox_json_ipam_vlans_list/data-source.tf b/examples/data-sources/netbox_json_ipam_vlans_list/data-source.tf new file mode 100644 index 000000000..8c8637cee --- /dev/null +++ b/examples/data-sources/netbox_json_ipam_vlans_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_ipam_vlans_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_ipam_vlans_list.test.json) +} diff --git a/examples/data-sources/netbox_json_ipam_vrfs_list/data-source.tf b/examples/data-sources/netbox_json_ipam_vrfs_list/data-source.tf new file mode 100644 index 000000000..0bee8a564 --- /dev/null +++ b/examples/data-sources/netbox_json_ipam_vrfs_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_ipam_vrfs_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_ipam_vrfs_list.test.json) +} diff --git a/examples/data-sources/netbox_json_tenancy_contact_assignments_list/data-source.tf b/examples/data-sources/netbox_json_tenancy_contact_assignments_list/data-source.tf new file mode 100644 index 000000000..6cf97c31f --- /dev/null +++ b/examples/data-sources/netbox_json_tenancy_contact_assignments_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_tenancy_contact_assignments_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_tenancy_contact_assignments_list.test.json) +} diff --git a/examples/data-sources/netbox_json_tenancy_contact_groups_list/data-source.tf b/examples/data-sources/netbox_json_tenancy_contact_groups_list/data-source.tf new file mode 100644 index 000000000..c353548fe --- /dev/null +++ b/examples/data-sources/netbox_json_tenancy_contact_groups_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_tenancy_contact_groups_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_tenancy_contact_groups_list.test.json) +} diff --git a/examples/data-sources/netbox_json_tenancy_contact_roles_list/data-source.tf b/examples/data-sources/netbox_json_tenancy_contact_roles_list/data-source.tf new file mode 100644 index 000000000..7fa92aa8b --- /dev/null +++ b/examples/data-sources/netbox_json_tenancy_contact_roles_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_tenancy_contact_roles_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_tenancy_contact_roles_list.test.json) +} diff --git a/examples/data-sources/netbox_json_tenancy_contacts_list/data-source.tf b/examples/data-sources/netbox_json_tenancy_contacts_list/data-source.tf new file mode 100644 index 000000000..a0d515e67 --- /dev/null +++ b/examples/data-sources/netbox_json_tenancy_contacts_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_tenancy_contacts_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_tenancy_contacts_list.test.json) +} diff --git a/examples/data-sources/netbox_json_tenancy_tenant_groups_list/data-source.tf b/examples/data-sources/netbox_json_tenancy_tenant_groups_list/data-source.tf new file mode 100644 index 000000000..1d3078229 --- /dev/null +++ b/examples/data-sources/netbox_json_tenancy_tenant_groups_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_tenancy_tenant_groups_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_tenancy_tenant_groups_list.test.json) +} diff --git a/examples/data-sources/netbox_json_tenancy_tenants_list/data-source.tf b/examples/data-sources/netbox_json_tenancy_tenants_list/data-source.tf new file mode 100644 index 000000000..1fbf141cb --- /dev/null +++ b/examples/data-sources/netbox_json_tenancy_tenants_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_tenancy_tenants_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_tenancy_tenants_list.test.json) +} diff --git a/examples/data-sources/netbox_json_users_groups_list/data-source.tf b/examples/data-sources/netbox_json_users_groups_list/data-source.tf new file mode 100644 index 000000000..b0a597a40 --- /dev/null +++ b/examples/data-sources/netbox_json_users_groups_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_users_groups_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_users_groups_list.test.json) +} diff --git a/examples/data-sources/netbox_json_users_permissions_list/data-source.tf b/examples/data-sources/netbox_json_users_permissions_list/data-source.tf new file mode 100644 index 000000000..52a3d6d45 --- /dev/null +++ b/examples/data-sources/netbox_json_users_permissions_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_users_permissions_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_users_permissions_list.test.json) +} diff --git a/examples/data-sources/netbox_json_users_tokens_list/data-source.tf b/examples/data-sources/netbox_json_users_tokens_list/data-source.tf new file mode 100644 index 000000000..2965e0abd --- /dev/null +++ b/examples/data-sources/netbox_json_users_tokens_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_users_tokens_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_users_tokens_list.test.json) +} diff --git a/examples/data-sources/netbox_json_users_users_list/data-source.tf b/examples/data-sources/netbox_json_users_users_list/data-source.tf new file mode 100644 index 000000000..0ee5dd357 --- /dev/null +++ b/examples/data-sources/netbox_json_users_users_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_users_users_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_users_users_list.test.json) +} diff --git a/examples/data-sources/netbox_json_virtualization_cluster_groups_list/data-source.tf b/examples/data-sources/netbox_json_virtualization_cluster_groups_list/data-source.tf new file mode 100644 index 000000000..53cab4ca1 --- /dev/null +++ b/examples/data-sources/netbox_json_virtualization_cluster_groups_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_virtualization_cluster_groups_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_virtualization_cluster_groups_list.test.json) +} diff --git a/examples/data-sources/netbox_json_virtualization_cluster_types_list/data-source.tf b/examples/data-sources/netbox_json_virtualization_cluster_types_list/data-source.tf new file mode 100644 index 000000000..5a078f5b1 --- /dev/null +++ b/examples/data-sources/netbox_json_virtualization_cluster_types_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_virtualization_cluster_types_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_virtualization_cluster_types_list.test.json) +} diff --git a/examples/data-sources/netbox_json_virtualization_clusters_list/data-source.tf b/examples/data-sources/netbox_json_virtualization_clusters_list/data-source.tf new file mode 100644 index 000000000..17999087b --- /dev/null +++ b/examples/data-sources/netbox_json_virtualization_clusters_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_virtualization_clusters_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_virtualization_clusters_list.test.json) +} diff --git a/examples/data-sources/netbox_json_virtualization_interfaces_list/data-source.tf b/examples/data-sources/netbox_json_virtualization_interfaces_list/data-source.tf new file mode 100644 index 000000000..05e6cefac --- /dev/null +++ b/examples/data-sources/netbox_json_virtualization_interfaces_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_virtualization_interfaces_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_virtualization_interfaces_list.test.json) +} diff --git a/examples/data-sources/netbox_json_virtualization_virtual_machines_list/data-source.tf b/examples/data-sources/netbox_json_virtualization_virtual_machines_list/data-source.tf new file mode 100644 index 000000000..cb872c4fa --- /dev/null +++ b/examples/data-sources/netbox_json_virtualization_virtual_machines_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_virtualization_virtual_machines_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_virtualization_virtual_machines_list.test.json) +} diff --git a/examples/data-sources/netbox_json_wireless_wireless_lan_groups_list/data-source.tf b/examples/data-sources/netbox_json_wireless_wireless_lan_groups_list/data-source.tf new file mode 100644 index 000000000..8f0cd933e --- /dev/null +++ b/examples/data-sources/netbox_json_wireless_wireless_lan_groups_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_wireless_wireless_lan_groups_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_wireless_wireless_lan_groups_list.test.json) +} diff --git a/examples/data-sources/netbox_json_wireless_wireless_lans_list/data-source.tf b/examples/data-sources/netbox_json_wireless_wireless_lans_list/data-source.tf new file mode 100644 index 000000000..5dc092720 --- /dev/null +++ b/examples/data-sources/netbox_json_wireless_wireless_lans_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_wireless_wireless_lans_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_wireless_wireless_lans_list.test.json) +} diff --git a/examples/data-sources/netbox_json_wireless_wireless_links_list/data-source.tf b/examples/data-sources/netbox_json_wireless_wireless_links_list/data-source.tf new file mode 100644 index 000000000..b8c310867 --- /dev/null +++ b/examples/data-sources/netbox_json_wireless_wireless_links_list/data-source.tf @@ -0,0 +1,7 @@ +data "netbox_json_wireless_wireless_links_list" "test" { + limit = 0 +} + +output "example" { + value = jsondecode(data.netbox_json_wireless_wireless_links_list.test.json) +} diff --git a/examples/provider/provider.tf b/examples/provider/provider.tf new file mode 100644 index 000000000..b9c350dec --- /dev/null +++ b/examples/provider/provider.tf @@ -0,0 +1,26 @@ +terraform { + required_version = ">= 0.14.0" + required_providers { + netbox = { + source = "smutel/netbox" + version = "~> 4.0.0" + } + } +} + +provider netbox { + # Environment variable NETBOX_URL + url = "127.0.0.1:8000" + + # Environment variable NETBOX_BASEPATH + basepath = "/api" + + # Environment variable NETBOX_TOKEN + token = "0123456789abcdef0123456789abcdef01234567" + + # Environment variable NETBOX_SCHEME + scheme = "http" + + # Environment variable NETBOX_INSECURE + insecure = "true" +} diff --git a/examples/resources/netbox_ipam_aggregate/resource.tf b/examples/resources/netbox_ipam_aggregate/resource.tf new file mode 100644 index 000000000..2363ddee1 --- /dev/null +++ b/examples/resources/netbox_ipam_aggregate/resource.tf @@ -0,0 +1,52 @@ +resource "netbox_ipam_aggregate" "aggregate_test" { + prefix = "192.168.56.0/24" + rir_id = 1 + date_created = "2020-12-21" + + tag { + name = "tag1" + slug = "tag1" + } + + custom_field { + name = "cf_boolean" + type = "boolean" + value = "true" + } + + custom_field { + name = "cf_date" + type = "date" + value = "2020-12-25" + } + + custom_field { + name = "cf_text" + type = "text" + value = "some text" + } + + custom_field { + name = "cf_integer" + type = "integer" + value = "10" + } + + custom_field { + name = "cf_selection" + type = "selection" + value = "1" + } + + custom_field { + name = "cf_url" + type = "url" + value = "https://github.com" + } + + custom_field { + name = "cf_multiple_selection" + type = "multiple" + value = "0,1" + } +} diff --git a/examples/resources/netbox_ipam_ip_addresses/resource.tf b/examples/resources/netbox_ipam_ip_addresses/resource.tf new file mode 100644 index 000000000..430447ba9 --- /dev/null +++ b/examples/resources/netbox_ipam_ip_addresses/resource.tf @@ -0,0 +1,51 @@ +resource "netbox_ipam_ip_addresses" "ip_test" { + address = "192.168.56.0/24" + description = "IP created by terraform" + status = "active" + + tag { + name = "tag1" + slug = "tag1" + } + + custom_field { + name = "cf_boolean" + type = "boolean" + value = "true" + } + + custom_field { + name = "cf_date" + type = "date" + value = "2020-12-25" + } + + custom_field { + name = "cf_text" + type = "text" + value = "some text" + } + + custom_field { + name = "cf_integer" + type = "integer" + value = "10" + } + + custom_field { + name = "cf_selection" + type = "selection" + value = "1" + } + + custom_field { + name = "cf_url" + type = "url" + value = "https://github.com" + } + + custom_field { + name = "cf_multiple_selection" + type = "multiple" + value = "0,1" + } diff --git a/examples/resources/netbox_ipam_prefix/resource.tf b/examples/resources/netbox_ipam_prefix/resource.tf new file mode 100644 index 000000000..78f34db79 --- /dev/null +++ b/examples/resources/netbox_ipam_prefix/resource.tf @@ -0,0 +1,55 @@ +resource "netbox_ipam_prefix" "prefix_test" { + prefix = "192.168.56.0/24" + vlan_id = netbox_ipam_vlan.vlan_test.id + description = "Prefix created by terraform" + site_id = netbox_ipam_vlan_group.vlan_group_test.site_id + role_id = data.netbox_ipam_roles.vlan_role_production.id + status = "active" + + tag { + name = "tag1" + slug = "tag1" + } + + custom_field { + name = "cf_boolean" + type = "boolean" + value = "true" + } + + custom_field { + name = "cf_date" + type = "date" + value = "2020-12-25" + } + + custom_field { + name = "cf_text" + type = "text" + value = "some text" + } + + custom_field { + name = "cf_integer" + type = "integer" + value = "10" + } + + custom_field { + name = "cf_selection" + type = "selection" + value = "1" + } + + custom_field { + name = "cf_url" + type = "url" + value = "https://github.com" + } + + custom_field { + name = "cf_multiple_selection" + type = "multiple" + value = "0,1" + } +} diff --git a/examples/resources/netbox_ipam_service/resource.tf b/examples/resources/netbox_ipam_service/resource.tf new file mode 100644 index 000000000..2dd882740 --- /dev/null +++ b/examples/resources/netbox_ipam_service/resource.tf @@ -0,0 +1,55 @@ +resource "netbox_ipam_service" "service_test" { + name = "SMTP" + virtualmachine_id = netbox_virtualization_vm.vm_test.id + ip_addresses_id = [netbox_ipam_ip_addresses.ip_test.id] + ports = ["22"] + protocol = "tcp" + description = "Service created by terraform" + + tag { + name = "tag1" + slug = "tag1" + } + + custom_field { + name = "cf_boolean" + type = "boolean" + value = "true" + } + + custom_field { + name = "cf_date" + type = "date" + value = "2020-12-25" + } + + custom_field { + name = "cf_text" + type = "text" + value = "some text" + } + + custom_field { + name = "cf_integer" + type = "integer" + value = "10" + } + + custom_field { + name = "cf_selection" + type = "selection" + value = "1" + } + + custom_field { + name = "cf_url" + type = "url" + value = "https://github.com" + } + + custom_field { + name = "cf_multiple_selection" + type = "multiple" + value = "0,1" + } +} diff --git a/examples/resources/netbox_ipam_vlan/resource.tf b/examples/resources/netbox_ipam_vlan/resource.tf new file mode 100644 index 000000000..1a4d61794 --- /dev/null +++ b/examples/resources/netbox_ipam_vlan/resource.tf @@ -0,0 +1,56 @@ +resource "netbox_ipam_vlan" "vlan_test" { + vlan_id = 100 + name = "TestVlan" + site_id = netbox_ipam_vlan_group.vlan_group_test.site_id + description = "VLAN created by terraform" + vlan_group_id = netbox_ipam_vlan_group.vlan_group_test.id + tenant_id = netbox_tenancy_tenant.tenant_test.id + role_id = data.netbox_ipam_roles.vlan_role_production.id + + tag { + name = "tag1" + slug = "tag1" + } + + custom_field { + name = "cf_boolean" + type = "boolean" + value = "true" + } + + custom_field { + name = "cf_date" + type = "date" + value = "2020-12-25" + } + + custom_field { + name = "cf_text" + type = "text" + value = "some text" + } + + custom_field { + name = "cf_integer" + type = "integer" + value = "10" + } + + custom_field { + name = "cf_selection" + type = "selection" + value = "1" + } + + custom_field { + name = "cf_url" + type = "url" + value = "https://github.com" + } + + custom_field { + name = "cf_multiple_selection" + type = "multiple" + value = "0,1" + } +} diff --git a/examples/resources/netbox_ipam_vlan_group/resource.tf b/examples/resources/netbox_ipam_vlan_group/resource.tf new file mode 100644 index 000000000..ffec0b9ed --- /dev/null +++ b/examples/resources/netbox_ipam_vlan_group/resource.tf @@ -0,0 +1,9 @@ +resource "netbox_ipam_vlan_group" "vlan_group_test" { + name = "TestVlanGroup" + slug = "TestVlanGroup" + + tag { + name = "tag1" + slug = "tag1" + } +} diff --git a/examples/resources/netbox_tenancy_contact/resource.tf b/examples/resources/netbox_tenancy_contact/resource.tf new file mode 100644 index 000000000..b775e318a --- /dev/null +++ b/examples/resources/netbox_tenancy_contact/resource.tf @@ -0,0 +1,56 @@ +resource "netbox_tenancy_contact" "contact_test" { + name = "John Doe" + title = "Someone in the world" + phone = "+330123456789" + email = "john.doe@unknown.com" + address = "Somewhere in the world" + comments = "Good contact" + contact_group_id = netbox_tenancy_contact_group.contact_group_02.id + + tag { + name = "tag1" + slug = "tag1" + } + + custom_field { + name = "cf_boolean" + type = "boolean" + value = "true" + } + + custom_field { + name = "cf_date" + type = "date" + value = "2020-12-25" + } + + custom_field { + name = "cf_text" + type = "text" + value = "some text" + } + + custom_field { + name = "cf_integer" + type = "integer" + value = "10" + } + + custom_field { + name = "cf_selection" + type = "selection" + value = "1" + } + + custom_field { + name = "cf_url" + type = "url" + value = "https://github.com" + } + + custom_field { + name = "cf_multiple_selection" + type = "multiple" + value = "0,1" + } +} diff --git a/examples/resources/netbox_tenancy_contact_assignment/resource.tf b/examples/resources/netbox_tenancy_contact_assignment/resource.tf new file mode 100644 index 000000000..27d838877 --- /dev/null +++ b/examples/resources/netbox_tenancy_contact_assignment/resource.tf @@ -0,0 +1,7 @@ +resource "netbox_tenancy_contact_assignment" "contact_assignment_01" { + contact_id = netbox_tenancy_contact.contact.id + contact_role_id = netbox_tenancy_contact_role.contact_role_02.id + content_type = netbox_virtualization_vm.vm_test.content_type + object_id = netbox_virtualization_vm.vm_test.id + priority = "primary" +} diff --git a/examples/resources/netbox_tenancy_contact_group/resource.tf b/examples/resources/netbox_tenancy_contact_group/resource.tf new file mode 100644 index 000000000..9762168a4 --- /dev/null +++ b/examples/resources/netbox_tenancy_contact_group/resource.tf @@ -0,0 +1,53 @@ +resource "netbox_tenancy_contact_group" "contact_group_test" { + description = "Contact group created by terraform" + name = "TestContactGroup" + parent_id = 10 + slug = "TestContactGroup" + + tag { + name = "tag1" + slug = "tag1" + } + + custom_field { + name = "cf_boolean" + type = "boolean" + value = "true" + } + + custom_field { + name = "cf_date" + type = "date" + value = "2020-12-25" + } + + custom_field { + name = "cf_text" + type = "text" + value = "some text" + } + + custom_field { + name = "cf_integer" + type = "integer" + value = "10" + } + + custom_field { + name = "cf_selection" + type = "selection" + value = "1" + } + + custom_field { + name = "cf_url" + type = "url" + value = "https://github.com" + } + + custom_field { + name = "cf_multiple_selection" + type = "multiple" + value = "0,1" + } +} diff --git a/examples/resources/netbox_tenancy_contact_role/resource.tf b/examples/resources/netbox_tenancy_contact_role/resource.tf new file mode 100644 index 000000000..b749c435a --- /dev/null +++ b/examples/resources/netbox_tenancy_contact_role/resource.tf @@ -0,0 +1,52 @@ +resource "netbox_tenancy_contact_role" "contact_role_test" { + description = "Contact role created by terraform" + name = "TestContactRole" + slug = "TestContactRole" + + tag { + name = "tag1" + slug = "tag1" + } + + custom_field { + name = "cf_boolean" + type = "boolean" + value = "true" + } + + custom_field { + name = "cf_date" + type = "date" + value = "2020-12-25" + } + + custom_field { + name = "cf_text" + type = "text" + value = "some text" + } + + custom_field { + name = "cf_integer" + type = "integer" + value = "10" + } + + custom_field { + name = "cf_selection" + type = "selection" + value = "1" + } + + custom_field { + name = "cf_url" + type = "url" + value = "https://github.com" + } + + custom_field { + name = "cf_multiple_selection" + type = "multiple" + value = "0,1" + } +} diff --git a/examples/resources/netbox_tenancy_tenant/resource.tf b/examples/resources/netbox_tenancy_tenant/resource.tf new file mode 100644 index 000000000..5571c3b47 --- /dev/null +++ b/examples/resources/netbox_tenancy_tenant/resource.tf @@ -0,0 +1,54 @@ +resource "netbox_tenancy_tenant" "tenant_test" { + name = "TestTenant" + slug = "TestTenant" + description = "Tenant created by terraform" + comments = "Some test comments" + tenant_group_id = netbox_tenancy_tenant_group.tenant_group_test.id + + tag { + name = "tag1" + slug = "tag1" + } + + custom_field { + name = "cf_boolean" + type = "boolean" + value = "true" + } + + custom_field { + name = "cf_date" + type = "date" + value = "2020-12-25" + } + + custom_field { + name = "cf_text" + type = "text" + value = "some text" + } + + custom_field { + name = "cf_integer" + type = "integer" + value = "10" + } + + custom_field { + name = "cf_selection" + type = "selection" + value = "1" + } + + custom_field { + name = "cf_url" + type = "url" + value = "https://github.com" + } + + custom_field { + name = "cf_multiple_selection" + type = "multiple" + value = "0,1" + } +} diff --git a/examples/resources/netbox_tenancy_tenant_group/resource.tf b/examples/resources/netbox_tenancy_tenant_group/resource.tf new file mode 100644 index 000000000..c300bb71d --- /dev/null +++ b/examples/resources/netbox_tenancy_tenant_group/resource.tf @@ -0,0 +1,9 @@ +resource "netbox_tenancy_tenant_group" "tenant_group_test" { + name = "TestTenantGroup" + slug = "TestTenantGroup" + + tag { + name = "tag1" + slug = "tag1" + } +} diff --git a/examples/resources/netbox_virtualization_interface/resource.tf b/examples/resources/netbox_virtualization_interface/resource.tf new file mode 100644 index 000000000..d0b4f8a93 --- /dev/null +++ b/examples/resources/netbox_virtualization_interface/resource.tf @@ -0,0 +1,49 @@ +resource "netbox_virtualization_interface" "interface_test" { + name = "default" + virtualmachine_id = netbox_virtualization_vm.vm_test.id + mac_address = "AA:AA:AA:AA:AA:AA" + mtu = 1500 + description = "Interface de test" + + custom_field { + name = "cf_boolean" + type = "boolean" + value = "true" + } + + custom_field { + name = "cf_date" + type = "date" + value = "2020-12-25" + } + + custom_field { + name = "cf_text" + type = "text" + value = "some text" + } + + custom_field { + name = "cf_integer" + type = "integer" + value = "10" + } + + custom_field { + name = "cf_selection" + type = "selection" + value = "1" + } + + custom_field { + name = "cf_url" + type = "url" + value = "https://github.com" + } + + custom_field { + name = "cf_multiple_selection" + type = "multiple" + value = "0,1" + } +} diff --git a/examples/resources/netbox_virtualization_vm/resource.tf b/examples/resources/netbox_virtualization_vm/resource.tf new file mode 100644 index 000000000..d2aeb0704 --- /dev/null +++ b/examples/resources/netbox_virtualization_vm/resource.tf @@ -0,0 +1,61 @@ +resource "netbox_virtualization_vm" "vm_test" { + name = "TestVm" + comments = "VM created by terraform" + vcpus = "2.00" + disk = 50 + memory = 16 + cluster_id = 1 + local_context_data = jsonencode( + { + hello = "world" + number = 1 + } + ) + + tag { + name = "tag1" + slug = "tag1" + } + + custom_field { + name = "cf_boolean" + type = "boolean" + value = "true" + } + + custom_field { + name = "cf_date" + type = "date" + value = "2020-12-25" + } + + custom_field { + name = "cf_text" + type = "text" + value = "some text" + } + + custom_field { + name = "cf_integer" + type = "integer" + value = "10" + } + + custom_field { + name = "cf_selection" + type = "selection" + value = "1" + } + + custom_field { + name = "cf_url" + type = "url" + value = "https://github.com" + } + + custom_field { + name = "cf_multiple_selection" + type = "multiple" + value = "0,1" + } +} diff --git a/go.mod b/go.mod index 3556e17a1..78d46bfa3 100644 --- a/go.mod +++ b/go.mod @@ -5,14 +5,20 @@ go 1.18 require ( github.com/go-openapi/runtime v0.24.1 github.com/go-openapi/strfmt v0.21.2 + github.com/hashicorp/terraform-plugin-docs v0.10.1 github.com/hashicorp/terraform-plugin-sdk/v2 v2.17.0 github.com/smutel/go-netbox v3.1.2+incompatible ) require ( + github.com/Masterminds/goutils v1.1.1 // indirect + github.com/Masterminds/semver/v3 v3.1.1 // indirect + github.com/Masterminds/sprig/v3 v3.2.2 // indirect github.com/agext/levenshtein v1.2.3 // indirect github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect + github.com/armon/go-radix v1.0.0 // indirect github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect + github.com/bgentry/speakeasy v0.1.0 // indirect github.com/fatih/color v1.13.0 // indirect github.com/go-openapi/analysis v0.21.3 // indirect github.com/go-openapi/errors v0.20.2 // indirect @@ -27,23 +33,31 @@ require ( github.com/google/go-cmp v0.5.8 // indirect github.com/google/uuid v1.3.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-checkpoint v0.5.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 // indirect github.com/hashicorp/go-hclog v1.2.1 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-plugin v1.4.4 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/go-version v1.5.0 // indirect + github.com/hashicorp/hc-install v0.3.2 // indirect github.com/hashicorp/hcl/v2 v2.12.0 // indirect github.com/hashicorp/logutils v1.0.0 // indirect + github.com/hashicorp/terraform-exec v0.16.1 // indirect + github.com/hashicorp/terraform-json v0.14.0 // indirect github.com/hashicorp/terraform-plugin-go v0.9.1 // indirect github.com/hashicorp/terraform-plugin-log v0.4.1 // indirect github.com/hashicorp/terraform-registry-address v0.0.0-20220510144317-d78f4a47ae27 // indirect github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 // indirect github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87 // indirect + github.com/huandu/xstrings v1.3.2 // indirect + github.com/imdario/mergo v0.3.13 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.12 // indirect github.com/mattn/go-isatty v0.0.14 // indirect + github.com/mitchellh/cli v1.1.4 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-testing-interface v1.14.1 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect @@ -52,11 +66,16 @@ require ( github.com/oklog/run v1.1.0 // indirect github.com/oklog/ulid v1.3.1 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect + github.com/posener/complete v1.2.3 // indirect + github.com/russross/blackfriday v1.6.0 // indirect + github.com/shopspring/decimal v1.3.1 // indirect + github.com/spf13/cast v1.5.0 // indirect github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect github.com/vmihailenco/msgpack/v4 v4.3.12 // indirect github.com/vmihailenco/tagparser v0.1.2 // indirect github.com/zclconf/go-cty v1.10.0 // indirect go.mongodb.org/mongo-driver v1.9.1 // indirect + golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e // indirect golang.org/x/net v0.0.0-20220607020251-c690dde0001d // indirect golang.org/x/sys v0.0.0-20220608164250-635b8c9b7f68 // indirect golang.org/x/text v0.3.7 // indirect diff --git a/go.sum b/go.sum index 4f2b165a3..e1a5cc58d 100644 --- a/go.sum +++ b/go.sum @@ -1,11 +1,27 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= +github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Masterminds/sprig/v3 v3.2.0/go.mod h1:tWhwTbUTndesPNeF0C900vKoq283u6zp4APT9vaF3SI= +github.com/Masterminds/sprig/v3 v3.2.2 h1:17jRggJu518dr3QaafizSXOjKYp94wKfABxUmyxvxX8= +github.com/Masterminds/sprig/v3 v3.2.2/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/go-winio v0.4.16 h1:FtSW/jqD+l4ba5iPBj9CODVtgfYAD8w2wS923g/cFDk= +github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 h1:YoJbenK9C67SkzkDfmQuVln04ygHj3vjZfd9FL+GmQQ= +github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/acomagu/bufpipe v1.0.3 h1:fxAGrHZTgQ9w5QqVItgzwj235/uYZYgbXitB+dLupOk= +github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0 h1:MzVXffFUye+ZcSR6opIgz9Co7WcDx6ZcY+RjfFHoA0I= @@ -13,9 +29,15 @@ github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/ github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ= github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= @@ -30,15 +52,29 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= +github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= +github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= +github.com/go-git/go-billy/v5 v5.2.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= +github.com/go-git/go-billy/v5 v5.3.1 h1:CPiOUAzKtMRvolEKw+bG1PLRpT7D3LIs3/3ey4Aiu34= +github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= +github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6vMiRR/nnVTBtavnB0= +github.com/go-git/go-git/v5 v5.4.2 h1:BXyZu9t0VkbiHtqrsvdq39UDhGJTl1h55VW6CSC4aY4= +github.com/go-git/go-git/v5 v5.4.2/go.mod h1:gQ1kArt6d+n+BGd+/B/I74HwRTLhth2+zti4ihgckDc= github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= github.com/go-openapi/analysis v0.21.3 h1:CPEa+B2oYCkb+lIKB4xP6Ork8Gvh0GNg9dm/twI3+QA= github.com/go-openapi/analysis v0.21.3/go.mod h1:2rtHDVV21tLgvJd+eXu+ExiOhfMO4+dNb7496llyke0= @@ -137,24 +173,40 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFb github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-checkpoint v0.5.0 h1:MFYpPZCnQqQTE18jFwSII6eUQrD/oxMFp3mlgcqk5mU= +github.com/hashicorp/go-checkpoint v0.5.0/go.mod h1:7nfLNL10NsxqO4iWuW6tWW0HjZuDrwkBuEQsVcpCOgg= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 h1:1/D3zfFHttUKaCaGKZ/dR2roBXv0vKbSCnssIldfQdI= github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320/go.mod h1:EiZBMaudVLy8fmjf9Npq1dq9RalhveqZG5w/yz3mHWs= github.com/hashicorp/go-hclog v1.2.1 h1:YQsLlGDJgwhXFpucSPyVbCBviQtjlHv3jLTlp8YmtEw= github.com/hashicorp/go-hclog v1.2.1/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-plugin v1.4.4 h1:NVdrSdFRt3SkZtNckJ6tog7gbpRrcbOjQi/rgF7JYWQ= github.com/hashicorp/go-plugin v1.4.4/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.4.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.5.0 h1:O293SZ2Eg+AAYijkVK3jR786Am1bhDEh2GHT0tIVE5E= github.com/hashicorp/go-version v1.5.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/hc-install v0.3.2 h1:oiQdJZvXmkNcRcEOOfM5n+VTsvNjWQeOjfAoO6dKSH8= +github.com/hashicorp/hc-install v0.3.2/go.mod h1:xMG6Tr8Fw1WFjlxH0A9v61cW15pFwgEGqEz0V4jisHs= github.com/hashicorp/hcl/v2 v2.12.0 h1:PsYxySWpMD4KPaoJLnsHwtK5Qptvj/4Q6s0t4sUxZf4= github.com/hashicorp/hcl/v2 v2.12.0/go.mod h1:FwWsfWEjyV/CMj8s/gqAuiviY72rJ1/oayI9WftqcKg= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/terraform-exec v0.16.1 h1:NAwZFJW2L2SaCBVZoVaH8LPImLOGbPLkSHy0IYbs2uE= +github.com/hashicorp/terraform-exec v0.16.1/go.mod h1:aj0lVshy8l+MHhFNoijNHtqTJQI3Xlowv5EOsEaGO7M= +github.com/hashicorp/terraform-json v0.14.0 h1:sh9iZ1Y8IFJLx+xQiKHGud6/TSUCM0N8e17dKDpqV7s= +github.com/hashicorp/terraform-json v0.14.0/go.mod h1:5A9HIWPkk4e5aeeXIBbkcOvaZbIYnAIkEyqP2pNSckM= +github.com/hashicorp/terraform-plugin-docs v0.10.1 h1:jiVYfhJ/hVXDAQN2XjLK3WH1A/YHgFCrFXPpxibvmjc= +github.com/hashicorp/terraform-plugin-docs v0.10.1/go.mod h1:47ZcsxMUJxAjGzHf+dZ9q78oYf4PeJxO1N+i5XDtXBc= github.com/hashicorp/terraform-plugin-go v0.9.1 h1:vXdHaQ6aqL+OF076nMSBV+JKPdmXlzG5mzVDD04WyPs= github.com/hashicorp/terraform-plugin-go v0.9.1/go.mod h1:ItjVSlQs70otlzcCwlPcU8FRXLdO973oYFRZwAOxy8M= github.com/hashicorp/terraform-plugin-log v0.4.1 h1:xpbmVhvuU3mgHzLetOmx9pkOL2rmgpu302XxddON6eo= @@ -167,18 +219,31 @@ github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 h1:HKL github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734/go.mod h1:kNDNcF7sN4DocDLBkQYz73HGKwN1ANB1blq4lIYLYvg= github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87 h1:xixZ2bWeofWV68J+x6AzmKuVM/JWCQwkWm6GW/MUR6I= github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= +github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw= +github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= +github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= +github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 h1:DowS9hvgyYSX4TO5NpyC606/Z4SxnNYbT+WX27or6Ck= +github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -192,14 +257,22 @@ github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0 github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mitchellh/cli v1.1.4 h1:qj8czE26AU4PbiaPXK5uVmMSM+V5BYsFBiM9HhGRLUA= +github.com/mitchellh/cli v1.1.4/go.mod h1:vTLESy5mRhKOs9KDp0/RATawxP1UqBmdrpVRMnpcvKQ= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= @@ -210,10 +283,10 @@ github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nsf/jsondiff v0.0.0-20200515183724-f29ed568f4ce h1:RPclfga2SEJmgMmz2k+Mg7cowZ8yv4Trqw9UsJby758= github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= @@ -228,19 +301,32 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXqo= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= +github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww= +github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= +github.com/sebdah/goldie v1.0.0/go.mod h1:jXP4hmWywNEwZzhMuv2ccnqTSFpuq8iyQhtQdkkZBH4= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= +github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/smutel/go-netbox v3.1.1+incompatible h1:H+4/jVhl0iaIKGch2tdDLDUArsDg+57TTc1phmm+mNw= -github.com/smutel/go-netbox v3.1.1+incompatible/go.mod h1:UlNOimilX5qQkyVvFaDgajexiCRjVqzh1i0r9kQfr+M= github.com/smutel/go-netbox v3.1.2+incompatible h1:Ku7N3ZbaoUMuhSqp8jxsBy3uiNmRdvJUkeWUjDKE0PA= github.com/smutel/go-netbox v3.1.2+incompatible/go.mod h1:UlNOimilX5qQkyVvFaDgajexiCRjVqzh1i0r9kQfr+M= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -248,6 +334,7 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -263,6 +350,8 @@ github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+ github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= github.com/vmihailenco/tagparser v0.1.2 h1:gnjoVuB/kljJ5wICEEOpx98oXMWPLj22G67Vbd1qPqc= github.com/vmihailenco/tagparser v0.1.2/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= +github.com/xanzy/ssh-agent v0.3.0 h1:wUMzuKtKilRgBAD1sUb8gOwwRr2FGoBVumcjoOACClI= +github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= @@ -280,12 +369,20 @@ go.mongodb.org/mongo-driver v1.9.1 h1:m078y9v7sBItkt1aaoe2YlvWEXcD263e1a4E1fBrJ1 go.mongodb.org/mongo-driver v1.9.1/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e h1:T8NU3HyQ8ClP4SEE+KbFlg6n0NhuTsN4MyznaarGsZM= +golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -303,6 +400,8 @@ golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= @@ -326,16 +425,22 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -400,18 +505,25 @@ google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscL google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/main.go b/main.go index 8cd6c8709..ebf1281f9 100644 --- a/main.go +++ b/main.go @@ -6,6 +6,12 @@ import ( "github.com/smutel/terraform-provider-netbox/v4/netbox" ) +// Run "go generate" to format example terraform files and generate the docs for the registry/website + +// Run the docs generation tool, check its repository for more information on how it works and how docs +// can be customized. +//go:generate go run github.com/hashicorp/terraform-plugin-docs/cmd/tfplugindocs + func main() { opts := &plugin.ServeOpts{ ProviderFunc: func() *schema.Provider { diff --git a/netbox/data_netbox_dcim_platform.go b/netbox/data_netbox_dcim_platform.go index 088744a7e..c3e5b8c68 100644 --- a/netbox/data_netbox_dcim_platform.go +++ b/netbox/data_netbox_dcim_platform.go @@ -13,16 +13,19 @@ import ( func dataNetboxDcimPlatform() *schema.Resource { return &schema.Resource{ - Read: dataNetboxDcimPlatformRead, + Description: "Get info about platform (dcim module) from netbox.", + Read: dataNetboxDcimPlatformRead, Schema: map[string]*schema.Schema{ "content_type": { - Type: schema.TypeString, - Computed: true, + Description: "Content type of this platform (dcim module).", + Type: schema.TypeString, + Computed: true, }, "slug": { - Type: schema.TypeString, - Required: true, + Description: "Slug of this platform (dcim module).", + Type: schema.TypeString, + Required: true, ValidateFunc: validation.StringMatch( regexp.MustCompile("^[-a-zA-Z0-9_]{1,50}$"), "Must be like ^[-a-zA-Z0-9_]{1,50}$"), diff --git a/netbox/data_netbox_dcim_site.go b/netbox/data_netbox_dcim_site.go index 4a8b506f0..30b2500b7 100644 --- a/netbox/data_netbox_dcim_site.go +++ b/netbox/data_netbox_dcim_site.go @@ -13,12 +13,14 @@ import ( func dataNetboxDcimSite() *schema.Resource { return &schema.Resource{ - Read: dataNetboxDcimSiteRead, + Description: "Get info about site (dcim module) from netbox.", + Read: dataNetboxDcimSiteRead, Schema: map[string]*schema.Schema{ "content_type": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "The content type of this site (dcim module).", }, "slug": { Type: schema.TypeString, @@ -26,6 +28,7 @@ func dataNetboxDcimSite() *schema.Resource { ValidateFunc: validation.StringMatch( regexp.MustCompile("^[-a-zA-Z0-9_]{1,50}$"), "Must be like ^[-a-zA-Z0-9_]{1,50}$"), + Description: "The slug of the site (dcim module).", }, }, } diff --git a/netbox/data_netbox_ipam_aggregate.go b/netbox/data_netbox_ipam_aggregate.go index 1fa4acb9c..ddd68014c 100644 --- a/netbox/data_netbox_ipam_aggregate.go +++ b/netbox/data_netbox_ipam_aggregate.go @@ -12,21 +12,25 @@ import ( func dataNetboxIpamAggregate() *schema.Resource { return &schema.Resource{ - Read: dataNetboxIpamAggregateRead, + Description: "Get info about aggregate (ipam module) from Netbox.", + Read: dataNetboxIpamAggregateRead, Schema: map[string]*schema.Schema{ "content_type": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "The content type of this aggregate (ipam module).", }, "prefix": { Type: schema.TypeString, Required: true, ValidateFunc: validation.IsCIDRNetwork(0, 256), + Description: "The prefix (with mask) used for this aggregate (ipam module).", }, "rir_id": { - Type: schema.TypeInt, - Required: true, + Type: schema.TypeInt, + Required: true, + Description: "The RIR id linked to this aggregate (ipam module).", }, }, } diff --git a/netbox/data_netbox_ipam_ip_addresses.go b/netbox/data_netbox_ipam_ip_addresses.go index e97dfca2c..5f93ec395 100644 --- a/netbox/data_netbox_ipam_ip_addresses.go +++ b/netbox/data_netbox_ipam_ip_addresses.go @@ -12,17 +12,20 @@ import ( func dataNetboxIpamIPAddresses() *schema.Resource { return &schema.Resource{ - Read: dataNetboxIpamIPAddressesRead, + Description: "Get info about IP addresses (ipam module) from netbox.", + Read: dataNetboxIpamIPAddressesRead, Schema: map[string]*schema.Schema{ "content_type": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "The content type of this ipam IP addresses (ipam module).", }, "address": { Type: schema.TypeString, Required: true, ValidateFunc: validation.IsCIDR, + Description: "The address (with mask) of the ipam IP addresses (ipam module).", }, }, } diff --git a/netbox/data_netbox_ipam_role.go b/netbox/data_netbox_ipam_role.go index 4b425f1d4..e76219c85 100644 --- a/netbox/data_netbox_ipam_role.go +++ b/netbox/data_netbox_ipam_role.go @@ -13,12 +13,14 @@ import ( func dataNetboxIpamRole() *schema.Resource { return &schema.Resource{ - Read: dataNetboxIpamRoleRead, + Description: "Get info about role (ipam module) from netbox.", + Read: dataNetboxIpamRoleRead, Schema: map[string]*schema.Schema{ "content_type": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "The content type of this role (ipam module).", }, "slug": { Type: schema.TypeString, @@ -26,6 +28,7 @@ func dataNetboxIpamRole() *schema.Resource { ValidateFunc: validation.StringMatch( regexp.MustCompile("^[-a-zA-Z0-9_]{1,50}$"), "Must be like ^[-a-zA-Z0-9_]{1,50}$"), + Description: "The slug of the role (ipam module).", }, }, } diff --git a/netbox/data_netbox_ipam_service.go b/netbox/data_netbox_ipam_service.go index fa3ccfc36..16343995e 100644 --- a/netbox/data_netbox_ipam_service.go +++ b/netbox/data_netbox_ipam_service.go @@ -12,37 +12,44 @@ import ( func dataNetboxIpamService() *schema.Resource { return &schema.Resource{ - Read: dataNetboxIpamServiceRead, + Description: "Get info about a service (ipam module) from netbox.", + Read: dataNetboxIpamServiceRead, Schema: map[string]*schema.Schema{ "content_type": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "The content type of this service (ipam module).", }, "device_id": { Type: schema.TypeInt, Optional: true, ConflictsWith: []string{"virtualmachine_id"}, + Description: "ID of the device linked to this service (ipam module).", }, "name": { Type: schema.TypeString, Required: true, ValidateFunc: validation.StringLenBetween(1, 50), + Description: "The name of this service (ipam module).", }, "port": { Type: schema.TypeInt, Required: true, ValidateFunc: validation.IntBetween(1, 65535), + Description: "The port of this service (ipam module).", }, "protocol": { Type: schema.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{"tcp", "udp"}, false), + Description: "The protocol of this service (ipam module) (tcp or udp).", }, "virtualmachine_id": { Type: schema.TypeInt, Optional: true, ConflictsWith: []string{"device_id"}, + Description: "ID of the VM linked to this service (ipam module).", }, }, } diff --git a/netbox/data_netbox_ipam_vlan.go b/netbox/data_netbox_ipam_vlan.go index 860280315..5497577f0 100644 --- a/netbox/data_netbox_ipam_vlan.go +++ b/netbox/data_netbox_ipam_vlan.go @@ -11,20 +11,24 @@ import ( func dataNetboxIpamVlan() *schema.Resource { return &schema.Resource{ - Read: dataNetboxIpamVlanRead, + Description: "Get info about vlan (ipam module) from netbox.", + Read: dataNetboxIpamVlanRead, Schema: map[string]*schema.Schema{ "content_type": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "The content type of this vlan (ipam module).", }, "vlan_id": { - Type: schema.TypeInt, - Required: true, + Type: schema.TypeInt, + Required: true, + Description: "The ID of the vlan (ipam module).", }, "vlan_group_id": { - Type: schema.TypeInt, - Optional: true, + Type: schema.TypeInt, + Optional: true, + Description: "ID of the vlan group where this vlan is attached to.", }, }, } diff --git a/netbox/data_netbox_ipam_vlan_group.go b/netbox/data_netbox_ipam_vlan_group.go index 548348db2..f89e959dd 100644 --- a/netbox/data_netbox_ipam_vlan_group.go +++ b/netbox/data_netbox_ipam_vlan_group.go @@ -13,12 +13,14 @@ import ( func dataNetboxIpamVlanGroup() *schema.Resource { return &schema.Resource{ - Read: dataNetboxIpamVlanGroupRead, + Description: "Get info about a vlan group (ipam module) from netbox.", + Read: dataNetboxIpamVlanGroupRead, Schema: map[string]*schema.Schema{ "content_type": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "The content type of this vlan group (ipam module).", }, "slug": { Type: schema.TypeString, @@ -26,6 +28,7 @@ func dataNetboxIpamVlanGroup() *schema.Resource { ValidateFunc: validation.StringMatch( regexp.MustCompile("^[-a-zA-Z0-9_]{1,50}$"), "Must be like ^[-a-zA-Z0-9_]{1,50}$"), + Description: "The slug of the vlan group (ipam module).", }, }, } diff --git a/netbox/data_netbox_json_circuits_circuit_terminations_list.go b/netbox/data_netbox_json_circuits_circuit_terminations_list.go index 040f26285..3bf81b890 100644 --- a/netbox/data_netbox_json_circuits_circuit_terminations_list.go +++ b/netbox/data_netbox_json_circuits_circuit_terminations_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/circuits" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/circuits" ) func dataNetboxJSONCircuitsCircuitTerminationsList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONCircuitsCircuitTerminationsListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the circuits_circuit_terminations_list Netbox endpoint.", + Read: dataNetboxJSONCircuitsCircuitTerminationsListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONCircuitsCircuitTerminationsListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := circuits.NewCircuitsCircuitTerminationsListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := circuits.NewCircuitsCircuitTerminationsListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Circuits.CircuitsCircuitTerminationsList(params, nil) - if err != nil { - return err - } + list, err := client.Circuits.CircuitsCircuitTerminationsList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONCircuitsCircuitTerminationsList") + d.Set("json", string(j)) + d.SetId("NetboxJSONCircuitsCircuitTerminationsList") - return nil + return nil } diff --git a/netbox/data_netbox_json_circuits_circuit_types_list.go b/netbox/data_netbox_json_circuits_circuit_types_list.go index 9e21e4226..0ffcbe2ee 100644 --- a/netbox/data_netbox_json_circuits_circuit_types_list.go +++ b/netbox/data_netbox_json_circuits_circuit_types_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/circuits" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/circuits" ) func dataNetboxJSONCircuitsCircuitTypesList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONCircuitsCircuitTypesListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the circuits_circuit_types_list Netbox endpoint.", + Read: dataNetboxJSONCircuitsCircuitTypesListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONCircuitsCircuitTypesListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := circuits.NewCircuitsCircuitTypesListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := circuits.NewCircuitsCircuitTypesListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Circuits.CircuitsCircuitTypesList(params, nil) - if err != nil { - return err - } + list, err := client.Circuits.CircuitsCircuitTypesList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONCircuitsCircuitTypesList") + d.Set("json", string(j)) + d.SetId("NetboxJSONCircuitsCircuitTypesList") - return nil + return nil } diff --git a/netbox/data_netbox_json_circuits_circuits_list.go b/netbox/data_netbox_json_circuits_circuits_list.go index f3b157e81..23ffe3f24 100644 --- a/netbox/data_netbox_json_circuits_circuits_list.go +++ b/netbox/data_netbox_json_circuits_circuits_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/circuits" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/circuits" ) func dataNetboxJSONCircuitsCircuitsList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONCircuitsCircuitsListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the circuits_circuits_list Netbox endpoint.", + Read: dataNetboxJSONCircuitsCircuitsListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONCircuitsCircuitsListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := circuits.NewCircuitsCircuitsListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := circuits.NewCircuitsCircuitsListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Circuits.CircuitsCircuitsList(params, nil) - if err != nil { - return err - } + list, err := client.Circuits.CircuitsCircuitsList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONCircuitsCircuitsList") + d.Set("json", string(j)) + d.SetId("NetboxJSONCircuitsCircuitsList") - return nil + return nil } diff --git a/netbox/data_netbox_json_circuits_provider_networks_list.go b/netbox/data_netbox_json_circuits_provider_networks_list.go index f924266ec..be7cd5c57 100644 --- a/netbox/data_netbox_json_circuits_provider_networks_list.go +++ b/netbox/data_netbox_json_circuits_provider_networks_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/circuits" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/circuits" ) func dataNetboxJSONCircuitsProviderNetworksList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONCircuitsProviderNetworksListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the circuits_provider_networks_list Netbox endpoint.", + Read: dataNetboxJSONCircuitsProviderNetworksListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONCircuitsProviderNetworksListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := circuits.NewCircuitsProviderNetworksListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := circuits.NewCircuitsProviderNetworksListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Circuits.CircuitsProviderNetworksList(params, nil) - if err != nil { - return err - } + list, err := client.Circuits.CircuitsProviderNetworksList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONCircuitsProviderNetworksList") + d.Set("json", string(j)) + d.SetId("NetboxJSONCircuitsProviderNetworksList") - return nil + return nil } diff --git a/netbox/data_netbox_json_circuits_providers_list.go b/netbox/data_netbox_json_circuits_providers_list.go index 5fad30014..fab844f5d 100644 --- a/netbox/data_netbox_json_circuits_providers_list.go +++ b/netbox/data_netbox_json_circuits_providers_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/circuits" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/circuits" ) func dataNetboxJSONCircuitsProvidersList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONCircuitsProvidersListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the circuits_providers_list Netbox endpoint.", + Read: dataNetboxJSONCircuitsProvidersListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONCircuitsProvidersListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := circuits.NewCircuitsProvidersListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := circuits.NewCircuitsProvidersListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Circuits.CircuitsProvidersList(params, nil) - if err != nil { - return err - } + list, err := client.Circuits.CircuitsProvidersList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONCircuitsProvidersList") + d.Set("json", string(j)) + d.SetId("NetboxJSONCircuitsProvidersList") - return nil + return nil } diff --git a/netbox/data_netbox_json_dcim_cables_list.go b/netbox/data_netbox_json_dcim_cables_list.go index fdb35d4d9..3fe8e9102 100644 --- a/netbox/data_netbox_json_dcim_cables_list.go +++ b/netbox/data_netbox_json_dcim_cables_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/dcim" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/dcim" ) func dataNetboxJSONDcimCablesList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONDcimCablesListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the dcim_cables_list Netbox endpoint.", + Read: dataNetboxJSONDcimCablesListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONDcimCablesListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := dcim.NewDcimCablesListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := dcim.NewDcimCablesListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Dcim.DcimCablesList(params, nil) - if err != nil { - return err - } + list, err := client.Dcim.DcimCablesList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONDcimCablesList") + d.Set("json", string(j)) + d.SetId("NetboxJSONDcimCablesList") - return nil + return nil } diff --git a/netbox/data_netbox_json_dcim_console_port_templates_list.go b/netbox/data_netbox_json_dcim_console_port_templates_list.go index c0474e7be..8375e213c 100644 --- a/netbox/data_netbox_json_dcim_console_port_templates_list.go +++ b/netbox/data_netbox_json_dcim_console_port_templates_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/dcim" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/dcim" ) func dataNetboxJSONDcimConsolePortTemplatesList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONDcimConsolePortTemplatesListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the dcim_console_port_templates_list Netbox endpoint.", + Read: dataNetboxJSONDcimConsolePortTemplatesListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONDcimConsolePortTemplatesListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := dcim.NewDcimConsolePortTemplatesListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := dcim.NewDcimConsolePortTemplatesListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Dcim.DcimConsolePortTemplatesList(params, nil) - if err != nil { - return err - } + list, err := client.Dcim.DcimConsolePortTemplatesList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONDcimConsolePortTemplatesList") + d.Set("json", string(j)) + d.SetId("NetboxJSONDcimConsolePortTemplatesList") - return nil + return nil } diff --git a/netbox/data_netbox_json_dcim_console_ports_list.go b/netbox/data_netbox_json_dcim_console_ports_list.go index 42a54ff5c..0ccb43eed 100644 --- a/netbox/data_netbox_json_dcim_console_ports_list.go +++ b/netbox/data_netbox_json_dcim_console_ports_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/dcim" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/dcim" ) func dataNetboxJSONDcimConsolePortsList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONDcimConsolePortsListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the dcim_console_ports_list Netbox endpoint.", + Read: dataNetboxJSONDcimConsolePortsListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONDcimConsolePortsListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := dcim.NewDcimConsolePortsListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := dcim.NewDcimConsolePortsListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Dcim.DcimConsolePortsList(params, nil) - if err != nil { - return err - } + list, err := client.Dcim.DcimConsolePortsList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONDcimConsolePortsList") + d.Set("json", string(j)) + d.SetId("NetboxJSONDcimConsolePortsList") - return nil + return nil } diff --git a/netbox/data_netbox_json_dcim_console_server_port_templates_list.go b/netbox/data_netbox_json_dcim_console_server_port_templates_list.go index 72f855571..d9a095ad3 100644 --- a/netbox/data_netbox_json_dcim_console_server_port_templates_list.go +++ b/netbox/data_netbox_json_dcim_console_server_port_templates_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/dcim" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/dcim" ) func dataNetboxJSONDcimConsoleServerPortTemplatesList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONDcimConsoleServerPortTemplatesListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the dcim_console_server_port_templates_list Netbox endpoint.", + Read: dataNetboxJSONDcimConsoleServerPortTemplatesListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONDcimConsoleServerPortTemplatesListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := dcim.NewDcimConsoleServerPortTemplatesListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := dcim.NewDcimConsoleServerPortTemplatesListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Dcim.DcimConsoleServerPortTemplatesList(params, nil) - if err != nil { - return err - } + list, err := client.Dcim.DcimConsoleServerPortTemplatesList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONDcimConsoleServerPortTemplatesList") + d.Set("json", string(j)) + d.SetId("NetboxJSONDcimConsoleServerPortTemplatesList") - return nil + return nil } diff --git a/netbox/data_netbox_json_dcim_console_server_ports_list.go b/netbox/data_netbox_json_dcim_console_server_ports_list.go index 1fd593e91..ad069f953 100644 --- a/netbox/data_netbox_json_dcim_console_server_ports_list.go +++ b/netbox/data_netbox_json_dcim_console_server_ports_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/dcim" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/dcim" ) func dataNetboxJSONDcimConsoleServerPortsList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONDcimConsoleServerPortsListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the dcim_console_server_ports_list Netbox endpoint.", + Read: dataNetboxJSONDcimConsoleServerPortsListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONDcimConsoleServerPortsListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := dcim.NewDcimConsoleServerPortsListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := dcim.NewDcimConsoleServerPortsListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Dcim.DcimConsoleServerPortsList(params, nil) - if err != nil { - return err - } + list, err := client.Dcim.DcimConsoleServerPortsList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONDcimConsoleServerPortsList") + d.Set("json", string(j)) + d.SetId("NetboxJSONDcimConsoleServerPortsList") - return nil + return nil } diff --git a/netbox/data_netbox_json_dcim_device_bay_templates_list.go b/netbox/data_netbox_json_dcim_device_bay_templates_list.go index 34ae74c4e..d2abd8b4a 100644 --- a/netbox/data_netbox_json_dcim_device_bay_templates_list.go +++ b/netbox/data_netbox_json_dcim_device_bay_templates_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/dcim" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/dcim" ) func dataNetboxJSONDcimDeviceBayTemplatesList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONDcimDeviceBayTemplatesListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the dcim_device_bay_templates_list Netbox endpoint.", + Read: dataNetboxJSONDcimDeviceBayTemplatesListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONDcimDeviceBayTemplatesListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := dcim.NewDcimDeviceBayTemplatesListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := dcim.NewDcimDeviceBayTemplatesListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Dcim.DcimDeviceBayTemplatesList(params, nil) - if err != nil { - return err - } + list, err := client.Dcim.DcimDeviceBayTemplatesList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONDcimDeviceBayTemplatesList") + d.Set("json", string(j)) + d.SetId("NetboxJSONDcimDeviceBayTemplatesList") - return nil + return nil } diff --git a/netbox/data_netbox_json_dcim_device_bays_list.go b/netbox/data_netbox_json_dcim_device_bays_list.go index c11273dd2..0fc821e20 100644 --- a/netbox/data_netbox_json_dcim_device_bays_list.go +++ b/netbox/data_netbox_json_dcim_device_bays_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/dcim" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/dcim" ) func dataNetboxJSONDcimDeviceBaysList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONDcimDeviceBaysListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the dcim_device_bays_list Netbox endpoint.", + Read: dataNetboxJSONDcimDeviceBaysListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONDcimDeviceBaysListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := dcim.NewDcimDeviceBaysListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := dcim.NewDcimDeviceBaysListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Dcim.DcimDeviceBaysList(params, nil) - if err != nil { - return err - } + list, err := client.Dcim.DcimDeviceBaysList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONDcimDeviceBaysList") + d.Set("json", string(j)) + d.SetId("NetboxJSONDcimDeviceBaysList") - return nil + return nil } diff --git a/netbox/data_netbox_json_dcim_device_roles_list.go b/netbox/data_netbox_json_dcim_device_roles_list.go index d93ed8768..7bcc0ad44 100644 --- a/netbox/data_netbox_json_dcim_device_roles_list.go +++ b/netbox/data_netbox_json_dcim_device_roles_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/dcim" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/dcim" ) func dataNetboxJSONDcimDeviceRolesList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONDcimDeviceRolesListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the dcim_device_roles_list Netbox endpoint.", + Read: dataNetboxJSONDcimDeviceRolesListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONDcimDeviceRolesListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := dcim.NewDcimDeviceRolesListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := dcim.NewDcimDeviceRolesListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Dcim.DcimDeviceRolesList(params, nil) - if err != nil { - return err - } + list, err := client.Dcim.DcimDeviceRolesList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONDcimDeviceRolesList") + d.Set("json", string(j)) + d.SetId("NetboxJSONDcimDeviceRolesList") - return nil + return nil } diff --git a/netbox/data_netbox_json_dcim_device_types_list.go b/netbox/data_netbox_json_dcim_device_types_list.go index 89c5f76ae..d77dfe824 100644 --- a/netbox/data_netbox_json_dcim_device_types_list.go +++ b/netbox/data_netbox_json_dcim_device_types_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/dcim" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/dcim" ) func dataNetboxJSONDcimDeviceTypesList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONDcimDeviceTypesListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the dcim_device_types_list Netbox endpoint.", + Read: dataNetboxJSONDcimDeviceTypesListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONDcimDeviceTypesListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := dcim.NewDcimDeviceTypesListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := dcim.NewDcimDeviceTypesListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Dcim.DcimDeviceTypesList(params, nil) - if err != nil { - return err - } + list, err := client.Dcim.DcimDeviceTypesList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONDcimDeviceTypesList") + d.Set("json", string(j)) + d.SetId("NetboxJSONDcimDeviceTypesList") - return nil + return nil } diff --git a/netbox/data_netbox_json_dcim_devices_list.go b/netbox/data_netbox_json_dcim_devices_list.go index f3741a745..8a1e103cc 100644 --- a/netbox/data_netbox_json_dcim_devices_list.go +++ b/netbox/data_netbox_json_dcim_devices_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/dcim" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/dcim" ) func dataNetboxJSONDcimDevicesList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONDcimDevicesListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the dcim_devices_list Netbox endpoint.", + Read: dataNetboxJSONDcimDevicesListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONDcimDevicesListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := dcim.NewDcimDevicesListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := dcim.NewDcimDevicesListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Dcim.DcimDevicesList(params, nil) - if err != nil { - return err - } + list, err := client.Dcim.DcimDevicesList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONDcimDevicesList") + d.Set("json", string(j)) + d.SetId("NetboxJSONDcimDevicesList") - return nil + return nil } diff --git a/netbox/data_netbox_json_dcim_front_port_templates_list.go b/netbox/data_netbox_json_dcim_front_port_templates_list.go index 0b6ab6327..2536b6a13 100644 --- a/netbox/data_netbox_json_dcim_front_port_templates_list.go +++ b/netbox/data_netbox_json_dcim_front_port_templates_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/dcim" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/dcim" ) func dataNetboxJSONDcimFrontPortTemplatesList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONDcimFrontPortTemplatesListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the dcim_front_port_templates_list Netbox endpoint.", + Read: dataNetboxJSONDcimFrontPortTemplatesListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONDcimFrontPortTemplatesListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := dcim.NewDcimFrontPortTemplatesListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := dcim.NewDcimFrontPortTemplatesListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Dcim.DcimFrontPortTemplatesList(params, nil) - if err != nil { - return err - } + list, err := client.Dcim.DcimFrontPortTemplatesList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONDcimFrontPortTemplatesList") + d.Set("json", string(j)) + d.SetId("NetboxJSONDcimFrontPortTemplatesList") - return nil + return nil } diff --git a/netbox/data_netbox_json_dcim_front_ports_list.go b/netbox/data_netbox_json_dcim_front_ports_list.go index 9751de4e2..4a531524d 100644 --- a/netbox/data_netbox_json_dcim_front_ports_list.go +++ b/netbox/data_netbox_json_dcim_front_ports_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/dcim" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/dcim" ) func dataNetboxJSONDcimFrontPortsList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONDcimFrontPortsListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the dcim_front_ports_list Netbox endpoint.", + Read: dataNetboxJSONDcimFrontPortsListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONDcimFrontPortsListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := dcim.NewDcimFrontPortsListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := dcim.NewDcimFrontPortsListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Dcim.DcimFrontPortsList(params, nil) - if err != nil { - return err - } + list, err := client.Dcim.DcimFrontPortsList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONDcimFrontPortsList") + d.Set("json", string(j)) + d.SetId("NetboxJSONDcimFrontPortsList") - return nil + return nil } diff --git a/netbox/data_netbox_json_dcim_interface_templates_list.go b/netbox/data_netbox_json_dcim_interface_templates_list.go index 51d6852b9..b1f2b7849 100644 --- a/netbox/data_netbox_json_dcim_interface_templates_list.go +++ b/netbox/data_netbox_json_dcim_interface_templates_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/dcim" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/dcim" ) func dataNetboxJSONDcimInterfaceTemplatesList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONDcimInterfaceTemplatesListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the dcim_interface_templates_list Netbox endpoint.", + Read: dataNetboxJSONDcimInterfaceTemplatesListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONDcimInterfaceTemplatesListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := dcim.NewDcimInterfaceTemplatesListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := dcim.NewDcimInterfaceTemplatesListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Dcim.DcimInterfaceTemplatesList(params, nil) - if err != nil { - return err - } + list, err := client.Dcim.DcimInterfaceTemplatesList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONDcimInterfaceTemplatesList") + d.Set("json", string(j)) + d.SetId("NetboxJSONDcimInterfaceTemplatesList") - return nil + return nil } diff --git a/netbox/data_netbox_json_dcim_interfaces_list.go b/netbox/data_netbox_json_dcim_interfaces_list.go index 7066770f3..68a149a62 100644 --- a/netbox/data_netbox_json_dcim_interfaces_list.go +++ b/netbox/data_netbox_json_dcim_interfaces_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/dcim" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/dcim" ) func dataNetboxJSONDcimInterfacesList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONDcimInterfacesListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the dcim_interfaces_list Netbox endpoint.", + Read: dataNetboxJSONDcimInterfacesListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONDcimInterfacesListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := dcim.NewDcimInterfacesListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := dcim.NewDcimInterfacesListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Dcim.DcimInterfacesList(params, nil) - if err != nil { - return err - } + list, err := client.Dcim.DcimInterfacesList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONDcimInterfacesList") + d.Set("json", string(j)) + d.SetId("NetboxJSONDcimInterfacesList") - return nil + return nil } diff --git a/netbox/data_netbox_json_dcim_inventory_items_list.go b/netbox/data_netbox_json_dcim_inventory_items_list.go index a33e3b572..8d66ec883 100644 --- a/netbox/data_netbox_json_dcim_inventory_items_list.go +++ b/netbox/data_netbox_json_dcim_inventory_items_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/dcim" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/dcim" ) func dataNetboxJSONDcimInventoryItemsList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONDcimInventoryItemsListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the dcim_inventory_items_list Netbox endpoint.", + Read: dataNetboxJSONDcimInventoryItemsListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONDcimInventoryItemsListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := dcim.NewDcimInventoryItemsListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := dcim.NewDcimInventoryItemsListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Dcim.DcimInventoryItemsList(params, nil) - if err != nil { - return err - } + list, err := client.Dcim.DcimInventoryItemsList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONDcimInventoryItemsList") + d.Set("json", string(j)) + d.SetId("NetboxJSONDcimInventoryItemsList") - return nil + return nil } diff --git a/netbox/data_netbox_json_dcim_locations_list.go b/netbox/data_netbox_json_dcim_locations_list.go index a56552502..e42d459ad 100644 --- a/netbox/data_netbox_json_dcim_locations_list.go +++ b/netbox/data_netbox_json_dcim_locations_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/dcim" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/dcim" ) func dataNetboxJSONDcimLocationsList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONDcimLocationsListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the dcim_locations_list Netbox endpoint.", + Read: dataNetboxJSONDcimLocationsListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONDcimLocationsListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := dcim.NewDcimLocationsListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := dcim.NewDcimLocationsListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Dcim.DcimLocationsList(params, nil) - if err != nil { - return err - } + list, err := client.Dcim.DcimLocationsList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONDcimLocationsList") + d.Set("json", string(j)) + d.SetId("NetboxJSONDcimLocationsList") - return nil + return nil } diff --git a/netbox/data_netbox_json_dcim_manufacturers_list.go b/netbox/data_netbox_json_dcim_manufacturers_list.go index eb218b777..40be2edb4 100644 --- a/netbox/data_netbox_json_dcim_manufacturers_list.go +++ b/netbox/data_netbox_json_dcim_manufacturers_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/dcim" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/dcim" ) func dataNetboxJSONDcimManufacturersList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONDcimManufacturersListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the dcim_manufacturers_list Netbox endpoint.", + Read: dataNetboxJSONDcimManufacturersListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONDcimManufacturersListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := dcim.NewDcimManufacturersListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := dcim.NewDcimManufacturersListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Dcim.DcimManufacturersList(params, nil) - if err != nil { - return err - } + list, err := client.Dcim.DcimManufacturersList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONDcimManufacturersList") + d.Set("json", string(j)) + d.SetId("NetboxJSONDcimManufacturersList") - return nil + return nil } diff --git a/netbox/data_netbox_json_dcim_platforms_list.go b/netbox/data_netbox_json_dcim_platforms_list.go index efb2ad7bd..e4e66bc4a 100644 --- a/netbox/data_netbox_json_dcim_platforms_list.go +++ b/netbox/data_netbox_json_dcim_platforms_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/dcim" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/dcim" ) func dataNetboxJSONDcimPlatformsList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONDcimPlatformsListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the dcim_platforms_list Netbox endpoint.", + Read: dataNetboxJSONDcimPlatformsListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONDcimPlatformsListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := dcim.NewDcimPlatformsListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := dcim.NewDcimPlatformsListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Dcim.DcimPlatformsList(params, nil) - if err != nil { - return err - } + list, err := client.Dcim.DcimPlatformsList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONDcimPlatformsList") + d.Set("json", string(j)) + d.SetId("NetboxJSONDcimPlatformsList") - return nil + return nil } diff --git a/netbox/data_netbox_json_dcim_power_feeds_list.go b/netbox/data_netbox_json_dcim_power_feeds_list.go index 68ac60861..746f7466b 100644 --- a/netbox/data_netbox_json_dcim_power_feeds_list.go +++ b/netbox/data_netbox_json_dcim_power_feeds_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/dcim" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/dcim" ) func dataNetboxJSONDcimPowerFeedsList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONDcimPowerFeedsListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the dcim_power_feeds_list Netbox endpoint.", + Read: dataNetboxJSONDcimPowerFeedsListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONDcimPowerFeedsListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := dcim.NewDcimPowerFeedsListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := dcim.NewDcimPowerFeedsListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Dcim.DcimPowerFeedsList(params, nil) - if err != nil { - return err - } + list, err := client.Dcim.DcimPowerFeedsList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONDcimPowerFeedsList") + d.Set("json", string(j)) + d.SetId("NetboxJSONDcimPowerFeedsList") - return nil + return nil } diff --git a/netbox/data_netbox_json_dcim_power_outlet_templates_list.go b/netbox/data_netbox_json_dcim_power_outlet_templates_list.go index 64b05cc97..b837b402a 100644 --- a/netbox/data_netbox_json_dcim_power_outlet_templates_list.go +++ b/netbox/data_netbox_json_dcim_power_outlet_templates_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/dcim" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/dcim" ) func dataNetboxJSONDcimPowerOutletTemplatesList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONDcimPowerOutletTemplatesListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the dcim_power_outlet_templates_list Netbox endpoint.", + Read: dataNetboxJSONDcimPowerOutletTemplatesListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONDcimPowerOutletTemplatesListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := dcim.NewDcimPowerOutletTemplatesListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := dcim.NewDcimPowerOutletTemplatesListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Dcim.DcimPowerOutletTemplatesList(params, nil) - if err != nil { - return err - } + list, err := client.Dcim.DcimPowerOutletTemplatesList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONDcimPowerOutletTemplatesList") + d.Set("json", string(j)) + d.SetId("NetboxJSONDcimPowerOutletTemplatesList") - return nil + return nil } diff --git a/netbox/data_netbox_json_dcim_power_outlets_list.go b/netbox/data_netbox_json_dcim_power_outlets_list.go index 4dd6eff15..d1de7e35f 100644 --- a/netbox/data_netbox_json_dcim_power_outlets_list.go +++ b/netbox/data_netbox_json_dcim_power_outlets_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/dcim" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/dcim" ) func dataNetboxJSONDcimPowerOutletsList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONDcimPowerOutletsListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the dcim_power_outlets_list Netbox endpoint.", + Read: dataNetboxJSONDcimPowerOutletsListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONDcimPowerOutletsListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := dcim.NewDcimPowerOutletsListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := dcim.NewDcimPowerOutletsListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Dcim.DcimPowerOutletsList(params, nil) - if err != nil { - return err - } + list, err := client.Dcim.DcimPowerOutletsList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONDcimPowerOutletsList") + d.Set("json", string(j)) + d.SetId("NetboxJSONDcimPowerOutletsList") - return nil + return nil } diff --git a/netbox/data_netbox_json_dcim_power_panels_list.go b/netbox/data_netbox_json_dcim_power_panels_list.go index 31b2d3dc4..4b5b34424 100644 --- a/netbox/data_netbox_json_dcim_power_panels_list.go +++ b/netbox/data_netbox_json_dcim_power_panels_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/dcim" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/dcim" ) func dataNetboxJSONDcimPowerPanelsList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONDcimPowerPanelsListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the dcim_power_panels_list Netbox endpoint.", + Read: dataNetboxJSONDcimPowerPanelsListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONDcimPowerPanelsListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := dcim.NewDcimPowerPanelsListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := dcim.NewDcimPowerPanelsListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Dcim.DcimPowerPanelsList(params, nil) - if err != nil { - return err - } + list, err := client.Dcim.DcimPowerPanelsList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONDcimPowerPanelsList") + d.Set("json", string(j)) + d.SetId("NetboxJSONDcimPowerPanelsList") - return nil + return nil } diff --git a/netbox/data_netbox_json_dcim_power_port_templates_list.go b/netbox/data_netbox_json_dcim_power_port_templates_list.go index 2f2dd66de..02cb635a6 100644 --- a/netbox/data_netbox_json_dcim_power_port_templates_list.go +++ b/netbox/data_netbox_json_dcim_power_port_templates_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/dcim" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/dcim" ) func dataNetboxJSONDcimPowerPortTemplatesList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONDcimPowerPortTemplatesListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the dcim_power_port_templates_list Netbox endpoint.", + Read: dataNetboxJSONDcimPowerPortTemplatesListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONDcimPowerPortTemplatesListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := dcim.NewDcimPowerPortTemplatesListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := dcim.NewDcimPowerPortTemplatesListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Dcim.DcimPowerPortTemplatesList(params, nil) - if err != nil { - return err - } + list, err := client.Dcim.DcimPowerPortTemplatesList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONDcimPowerPortTemplatesList") + d.Set("json", string(j)) + d.SetId("NetboxJSONDcimPowerPortTemplatesList") - return nil + return nil } diff --git a/netbox/data_netbox_json_dcim_power_ports_list.go b/netbox/data_netbox_json_dcim_power_ports_list.go index 42c871181..9b1b1da0d 100644 --- a/netbox/data_netbox_json_dcim_power_ports_list.go +++ b/netbox/data_netbox_json_dcim_power_ports_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/dcim" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/dcim" ) func dataNetboxJSONDcimPowerPortsList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONDcimPowerPortsListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the dcim_power_ports_list Netbox endpoint.", + Read: dataNetboxJSONDcimPowerPortsListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONDcimPowerPortsListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := dcim.NewDcimPowerPortsListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := dcim.NewDcimPowerPortsListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Dcim.DcimPowerPortsList(params, nil) - if err != nil { - return err - } + list, err := client.Dcim.DcimPowerPortsList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONDcimPowerPortsList") + d.Set("json", string(j)) + d.SetId("NetboxJSONDcimPowerPortsList") - return nil + return nil } diff --git a/netbox/data_netbox_json_dcim_rack_reservations_list.go b/netbox/data_netbox_json_dcim_rack_reservations_list.go index b247f20cc..5c3a49040 100644 --- a/netbox/data_netbox_json_dcim_rack_reservations_list.go +++ b/netbox/data_netbox_json_dcim_rack_reservations_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/dcim" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/dcim" ) func dataNetboxJSONDcimRackReservationsList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONDcimRackReservationsListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the dcim_rack_reservations_list Netbox endpoint.", + Read: dataNetboxJSONDcimRackReservationsListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONDcimRackReservationsListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := dcim.NewDcimRackReservationsListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := dcim.NewDcimRackReservationsListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Dcim.DcimRackReservationsList(params, nil) - if err != nil { - return err - } + list, err := client.Dcim.DcimRackReservationsList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONDcimRackReservationsList") + d.Set("json", string(j)) + d.SetId("NetboxJSONDcimRackReservationsList") - return nil + return nil } diff --git a/netbox/data_netbox_json_dcim_rack_roles_list.go b/netbox/data_netbox_json_dcim_rack_roles_list.go index 3f65146ec..4fa472831 100644 --- a/netbox/data_netbox_json_dcim_rack_roles_list.go +++ b/netbox/data_netbox_json_dcim_rack_roles_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/dcim" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/dcim" ) func dataNetboxJSONDcimRackRolesList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONDcimRackRolesListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the dcim_rack_roles_list Netbox endpoint.", + Read: dataNetboxJSONDcimRackRolesListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONDcimRackRolesListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := dcim.NewDcimRackRolesListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := dcim.NewDcimRackRolesListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Dcim.DcimRackRolesList(params, nil) - if err != nil { - return err - } + list, err := client.Dcim.DcimRackRolesList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONDcimRackRolesList") + d.Set("json", string(j)) + d.SetId("NetboxJSONDcimRackRolesList") - return nil + return nil } diff --git a/netbox/data_netbox_json_dcim_racks_list.go b/netbox/data_netbox_json_dcim_racks_list.go index eb3fd3896..b95770960 100644 --- a/netbox/data_netbox_json_dcim_racks_list.go +++ b/netbox/data_netbox_json_dcim_racks_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/dcim" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/dcim" ) func dataNetboxJSONDcimRacksList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONDcimRacksListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the dcim_racks_list Netbox endpoint.", + Read: dataNetboxJSONDcimRacksListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONDcimRacksListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := dcim.NewDcimRacksListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := dcim.NewDcimRacksListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Dcim.DcimRacksList(params, nil) - if err != nil { - return err - } + list, err := client.Dcim.DcimRacksList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONDcimRacksList") + d.Set("json", string(j)) + d.SetId("NetboxJSONDcimRacksList") - return nil + return nil } diff --git a/netbox/data_netbox_json_dcim_rear_port_templates_list.go b/netbox/data_netbox_json_dcim_rear_port_templates_list.go index 3d393b742..6d8627003 100644 --- a/netbox/data_netbox_json_dcim_rear_port_templates_list.go +++ b/netbox/data_netbox_json_dcim_rear_port_templates_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/dcim" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/dcim" ) func dataNetboxJSONDcimRearPortTemplatesList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONDcimRearPortTemplatesListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the dcim_rear_port_templates_list Netbox endpoint.", + Read: dataNetboxJSONDcimRearPortTemplatesListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONDcimRearPortTemplatesListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := dcim.NewDcimRearPortTemplatesListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := dcim.NewDcimRearPortTemplatesListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Dcim.DcimRearPortTemplatesList(params, nil) - if err != nil { - return err - } + list, err := client.Dcim.DcimRearPortTemplatesList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONDcimRearPortTemplatesList") + d.Set("json", string(j)) + d.SetId("NetboxJSONDcimRearPortTemplatesList") - return nil + return nil } diff --git a/netbox/data_netbox_json_dcim_rear_ports_list.go b/netbox/data_netbox_json_dcim_rear_ports_list.go index a28943b0e..a1602954c 100644 --- a/netbox/data_netbox_json_dcim_rear_ports_list.go +++ b/netbox/data_netbox_json_dcim_rear_ports_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/dcim" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/dcim" ) func dataNetboxJSONDcimRearPortsList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONDcimRearPortsListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the dcim_rear_ports_list Netbox endpoint.", + Read: dataNetboxJSONDcimRearPortsListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONDcimRearPortsListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := dcim.NewDcimRearPortsListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := dcim.NewDcimRearPortsListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Dcim.DcimRearPortsList(params, nil) - if err != nil { - return err - } + list, err := client.Dcim.DcimRearPortsList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONDcimRearPortsList") + d.Set("json", string(j)) + d.SetId("NetboxJSONDcimRearPortsList") - return nil + return nil } diff --git a/netbox/data_netbox_json_dcim_regions_list.go b/netbox/data_netbox_json_dcim_regions_list.go index 99082f898..9e88273e6 100644 --- a/netbox/data_netbox_json_dcim_regions_list.go +++ b/netbox/data_netbox_json_dcim_regions_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/dcim" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/dcim" ) func dataNetboxJSONDcimRegionsList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONDcimRegionsListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the dcim_regions_list Netbox endpoint.", + Read: dataNetboxJSONDcimRegionsListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONDcimRegionsListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := dcim.NewDcimRegionsListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := dcim.NewDcimRegionsListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Dcim.DcimRegionsList(params, nil) - if err != nil { - return err - } + list, err := client.Dcim.DcimRegionsList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONDcimRegionsList") + d.Set("json", string(j)) + d.SetId("NetboxJSONDcimRegionsList") - return nil + return nil } diff --git a/netbox/data_netbox_json_dcim_site_groups_list.go b/netbox/data_netbox_json_dcim_site_groups_list.go index 795d1e12a..fe42fd863 100644 --- a/netbox/data_netbox_json_dcim_site_groups_list.go +++ b/netbox/data_netbox_json_dcim_site_groups_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/dcim" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/dcim" ) func dataNetboxJSONDcimSiteGroupsList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONDcimSiteGroupsListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the dcim_site_groups_list Netbox endpoint.", + Read: dataNetboxJSONDcimSiteGroupsListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONDcimSiteGroupsListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := dcim.NewDcimSiteGroupsListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := dcim.NewDcimSiteGroupsListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Dcim.DcimSiteGroupsList(params, nil) - if err != nil { - return err - } + list, err := client.Dcim.DcimSiteGroupsList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONDcimSiteGroupsList") + d.Set("json", string(j)) + d.SetId("NetboxJSONDcimSiteGroupsList") - return nil + return nil } diff --git a/netbox/data_netbox_json_dcim_sites_list.go b/netbox/data_netbox_json_dcim_sites_list.go index 7ca3d82d4..af06b9192 100644 --- a/netbox/data_netbox_json_dcim_sites_list.go +++ b/netbox/data_netbox_json_dcim_sites_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/dcim" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/dcim" ) func dataNetboxJSONDcimSitesList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONDcimSitesListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the dcim_sites_list Netbox endpoint.", + Read: dataNetboxJSONDcimSitesListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONDcimSitesListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := dcim.NewDcimSitesListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := dcim.NewDcimSitesListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Dcim.DcimSitesList(params, nil) - if err != nil { - return err - } + list, err := client.Dcim.DcimSitesList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONDcimSitesList") + d.Set("json", string(j)) + d.SetId("NetboxJSONDcimSitesList") - return nil + return nil } diff --git a/netbox/data_netbox_json_dcim_virtual_chassis_list.go b/netbox/data_netbox_json_dcim_virtual_chassis_list.go index 62f9fd567..21930d200 100644 --- a/netbox/data_netbox_json_dcim_virtual_chassis_list.go +++ b/netbox/data_netbox_json_dcim_virtual_chassis_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/dcim" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/dcim" ) func dataNetboxJSONDcimVirtualChassisList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONDcimVirtualChassisListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the dcim_virtual_chassis_list Netbox endpoint.", + Read: dataNetboxJSONDcimVirtualChassisListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONDcimVirtualChassisListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := dcim.NewDcimVirtualChassisListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := dcim.NewDcimVirtualChassisListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Dcim.DcimVirtualChassisList(params, nil) - if err != nil { - return err - } + list, err := client.Dcim.DcimVirtualChassisList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONDcimVirtualChassisList") + d.Set("json", string(j)) + d.SetId("NetboxJSONDcimVirtualChassisList") - return nil + return nil } diff --git a/netbox/data_netbox_json_extras_config_contexts_list.go b/netbox/data_netbox_json_extras_config_contexts_list.go index 529b6a50e..b45104968 100644 --- a/netbox/data_netbox_json_extras_config_contexts_list.go +++ b/netbox/data_netbox_json_extras_config_contexts_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/extras" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/extras" ) func dataNetboxJSONExtrasConfigContextsList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONExtrasConfigContextsListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the extras_config_contexts_list Netbox endpoint.", + Read: dataNetboxJSONExtrasConfigContextsListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONExtrasConfigContextsListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := extras.NewExtrasConfigContextsListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := extras.NewExtrasConfigContextsListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Extras.ExtrasConfigContextsList(params, nil) - if err != nil { - return err - } + list, err := client.Extras.ExtrasConfigContextsList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONExtrasConfigContextsList") + d.Set("json", string(j)) + d.SetId("NetboxJSONExtrasConfigContextsList") - return nil + return nil } diff --git a/netbox/data_netbox_json_extras_content_types_list.go b/netbox/data_netbox_json_extras_content_types_list.go index 231fa2a84..892044bba 100644 --- a/netbox/data_netbox_json_extras_content_types_list.go +++ b/netbox/data_netbox_json_extras_content_types_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/extras" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/extras" ) func dataNetboxJSONExtrasContentTypesList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONExtrasContentTypesListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the extras_content_types_list Netbox endpoint.", + Read: dataNetboxJSONExtrasContentTypesListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONExtrasContentTypesListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := extras.NewExtrasContentTypesListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := extras.NewExtrasContentTypesListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Extras.ExtrasContentTypesList(params, nil) - if err != nil { - return err - } + list, err := client.Extras.ExtrasContentTypesList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONExtrasContentTypesList") + d.Set("json", string(j)) + d.SetId("NetboxJSONExtrasContentTypesList") - return nil + return nil } diff --git a/netbox/data_netbox_json_extras_custom_fields_list.go b/netbox/data_netbox_json_extras_custom_fields_list.go index d4ebdf054..9d241c1cc 100644 --- a/netbox/data_netbox_json_extras_custom_fields_list.go +++ b/netbox/data_netbox_json_extras_custom_fields_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/extras" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/extras" ) func dataNetboxJSONExtrasCustomFieldsList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONExtrasCustomFieldsListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the extras_custom_fields_list Netbox endpoint.", + Read: dataNetboxJSONExtrasCustomFieldsListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONExtrasCustomFieldsListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := extras.NewExtrasCustomFieldsListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := extras.NewExtrasCustomFieldsListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Extras.ExtrasCustomFieldsList(params, nil) - if err != nil { - return err - } + list, err := client.Extras.ExtrasCustomFieldsList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONExtrasCustomFieldsList") + d.Set("json", string(j)) + d.SetId("NetboxJSONExtrasCustomFieldsList") - return nil + return nil } diff --git a/netbox/data_netbox_json_extras_custom_links_list.go b/netbox/data_netbox_json_extras_custom_links_list.go index ba22bbfc0..f6f54ad35 100644 --- a/netbox/data_netbox_json_extras_custom_links_list.go +++ b/netbox/data_netbox_json_extras_custom_links_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/extras" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/extras" ) func dataNetboxJSONExtrasCustomLinksList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONExtrasCustomLinksListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the extras_custom_links_list Netbox endpoint.", + Read: dataNetboxJSONExtrasCustomLinksListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONExtrasCustomLinksListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := extras.NewExtrasCustomLinksListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := extras.NewExtrasCustomLinksListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Extras.ExtrasCustomLinksList(params, nil) - if err != nil { - return err - } + list, err := client.Extras.ExtrasCustomLinksList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONExtrasCustomLinksList") + d.Set("json", string(j)) + d.SetId("NetboxJSONExtrasCustomLinksList") - return nil + return nil } diff --git a/netbox/data_netbox_json_extras_export_templates_list.go b/netbox/data_netbox_json_extras_export_templates_list.go index 059809137..cecc5a3ae 100644 --- a/netbox/data_netbox_json_extras_export_templates_list.go +++ b/netbox/data_netbox_json_extras_export_templates_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/extras" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/extras" ) func dataNetboxJSONExtrasExportTemplatesList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONExtrasExportTemplatesListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the extras_export_templates_list Netbox endpoint.", + Read: dataNetboxJSONExtrasExportTemplatesListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONExtrasExportTemplatesListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := extras.NewExtrasExportTemplatesListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := extras.NewExtrasExportTemplatesListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Extras.ExtrasExportTemplatesList(params, nil) - if err != nil { - return err - } + list, err := client.Extras.ExtrasExportTemplatesList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONExtrasExportTemplatesList") + d.Set("json", string(j)) + d.SetId("NetboxJSONExtrasExportTemplatesList") - return nil + return nil } diff --git a/netbox/data_netbox_json_extras_image_attachments_list.go b/netbox/data_netbox_json_extras_image_attachments_list.go index 2d495e771..08460161d 100644 --- a/netbox/data_netbox_json_extras_image_attachments_list.go +++ b/netbox/data_netbox_json_extras_image_attachments_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/extras" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/extras" ) func dataNetboxJSONExtrasImageAttachmentsList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONExtrasImageAttachmentsListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the extras_image_attachments_list Netbox endpoint.", + Read: dataNetboxJSONExtrasImageAttachmentsListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONExtrasImageAttachmentsListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := extras.NewExtrasImageAttachmentsListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := extras.NewExtrasImageAttachmentsListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Extras.ExtrasImageAttachmentsList(params, nil) - if err != nil { - return err - } + list, err := client.Extras.ExtrasImageAttachmentsList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONExtrasImageAttachmentsList") + d.Set("json", string(j)) + d.SetId("NetboxJSONExtrasImageAttachmentsList") - return nil + return nil } diff --git a/netbox/data_netbox_json_extras_job_results_list.go b/netbox/data_netbox_json_extras_job_results_list.go index 5b2df246a..521d24994 100644 --- a/netbox/data_netbox_json_extras_job_results_list.go +++ b/netbox/data_netbox_json_extras_job_results_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/extras" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/extras" ) func dataNetboxJSONExtrasJobResultsList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONExtrasJobResultsListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the extras_job_results_list Netbox endpoint.", + Read: dataNetboxJSONExtrasJobResultsListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONExtrasJobResultsListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := extras.NewExtrasJobResultsListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := extras.NewExtrasJobResultsListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Extras.ExtrasJobResultsList(params, nil) - if err != nil { - return err - } + list, err := client.Extras.ExtrasJobResultsList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONExtrasJobResultsList") + d.Set("json", string(j)) + d.SetId("NetboxJSONExtrasJobResultsList") - return nil + return nil } diff --git a/netbox/data_netbox_json_extras_journal_entries_list.go b/netbox/data_netbox_json_extras_journal_entries_list.go index f60533857..ddf67f428 100644 --- a/netbox/data_netbox_json_extras_journal_entries_list.go +++ b/netbox/data_netbox_json_extras_journal_entries_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/extras" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/extras" ) func dataNetboxJSONExtrasJournalEntriesList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONExtrasJournalEntriesListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the extras_journal_entries_list Netbox endpoint.", + Read: dataNetboxJSONExtrasJournalEntriesListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONExtrasJournalEntriesListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := extras.NewExtrasJournalEntriesListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := extras.NewExtrasJournalEntriesListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Extras.ExtrasJournalEntriesList(params, nil) - if err != nil { - return err - } + list, err := client.Extras.ExtrasJournalEntriesList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONExtrasJournalEntriesList") + d.Set("json", string(j)) + d.SetId("NetboxJSONExtrasJournalEntriesList") - return nil + return nil } diff --git a/netbox/data_netbox_json_extras_object_changes_list.go b/netbox/data_netbox_json_extras_object_changes_list.go index bc2896e78..f4d780818 100644 --- a/netbox/data_netbox_json_extras_object_changes_list.go +++ b/netbox/data_netbox_json_extras_object_changes_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/extras" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/extras" ) func dataNetboxJSONExtrasObjectChangesList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONExtrasObjectChangesListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the extras_object_changes_list Netbox endpoint.", + Read: dataNetboxJSONExtrasObjectChangesListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONExtrasObjectChangesListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := extras.NewExtrasObjectChangesListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := extras.NewExtrasObjectChangesListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Extras.ExtrasObjectChangesList(params, nil) - if err != nil { - return err - } + list, err := client.Extras.ExtrasObjectChangesList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONExtrasObjectChangesList") + d.Set("json", string(j)) + d.SetId("NetboxJSONExtrasObjectChangesList") - return nil + return nil } diff --git a/netbox/data_netbox_json_extras_tags_list.go b/netbox/data_netbox_json_extras_tags_list.go index b9622c86b..e7ba7c544 100644 --- a/netbox/data_netbox_json_extras_tags_list.go +++ b/netbox/data_netbox_json_extras_tags_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/extras" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/extras" ) func dataNetboxJSONExtrasTagsList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONExtrasTagsListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the extras_tags_list Netbox endpoint.", + Read: dataNetboxJSONExtrasTagsListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONExtrasTagsListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := extras.NewExtrasTagsListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := extras.NewExtrasTagsListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Extras.ExtrasTagsList(params, nil) - if err != nil { - return err - } + list, err := client.Extras.ExtrasTagsList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONExtrasTagsList") + d.Set("json", string(j)) + d.SetId("NetboxJSONExtrasTagsList") - return nil + return nil } diff --git a/netbox/data_netbox_json_extras_webhooks_list.go b/netbox/data_netbox_json_extras_webhooks_list.go index 548f4db53..9e55a22a3 100644 --- a/netbox/data_netbox_json_extras_webhooks_list.go +++ b/netbox/data_netbox_json_extras_webhooks_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/extras" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/extras" ) func dataNetboxJSONExtrasWebhooksList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONExtrasWebhooksListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the extras_webhooks_list Netbox endpoint.", + Read: dataNetboxJSONExtrasWebhooksListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONExtrasWebhooksListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := extras.NewExtrasWebhooksListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := extras.NewExtrasWebhooksListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Extras.ExtrasWebhooksList(params, nil) - if err != nil { - return err - } + list, err := client.Extras.ExtrasWebhooksList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONExtrasWebhooksList") + d.Set("json", string(j)) + d.SetId("NetboxJSONExtrasWebhooksList") - return nil + return nil } diff --git a/netbox/data_netbox_json_ipam_aggregates_list.go b/netbox/data_netbox_json_ipam_aggregates_list.go index cd4eee8bc..124af93a2 100644 --- a/netbox/data_netbox_json_ipam_aggregates_list.go +++ b/netbox/data_netbox_json_ipam_aggregates_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/ipam" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/ipam" ) func dataNetboxJSONIpamAggregatesList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONIpamAggregatesListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the ipam_aggregates_list Netbox endpoint.", + Read: dataNetboxJSONIpamAggregatesListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONIpamAggregatesListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := ipam.NewIpamAggregatesListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := ipam.NewIpamAggregatesListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Ipam.IpamAggregatesList(params, nil) - if err != nil { - return err - } + list, err := client.Ipam.IpamAggregatesList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONIpamAggregatesList") + d.Set("json", string(j)) + d.SetId("NetboxJSONIpamAggregatesList") - return nil + return nil } diff --git a/netbox/data_netbox_json_ipam_asns_list.go b/netbox/data_netbox_json_ipam_asns_list.go index 1143296f8..7c494817f 100644 --- a/netbox/data_netbox_json_ipam_asns_list.go +++ b/netbox/data_netbox_json_ipam_asns_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/ipam" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/ipam" ) func dataNetboxJSONIpamAsnsList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONIpamAsnsListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the ipam_asns_list Netbox endpoint.", + Read: dataNetboxJSONIpamAsnsListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONIpamAsnsListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := ipam.NewIpamAsnsListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := ipam.NewIpamAsnsListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Ipam.IpamAsnsList(params, nil) - if err != nil { - return err - } + list, err := client.Ipam.IpamAsnsList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONIpamAsnsList") + d.Set("json", string(j)) + d.SetId("NetboxJSONIpamAsnsList") - return nil + return nil } diff --git a/netbox/data_netbox_json_ipam_fhrp_group_assignments_list.go b/netbox/data_netbox_json_ipam_fhrp_group_assignments_list.go index a52fba6a4..5803addb5 100644 --- a/netbox/data_netbox_json_ipam_fhrp_group_assignments_list.go +++ b/netbox/data_netbox_json_ipam_fhrp_group_assignments_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/ipam" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/ipam" ) func dataNetboxJSONIpamFhrpGroupAssignmentsList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONIpamFhrpGroupAssignmentsListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the ipam_fhrp_group_assignments_list Netbox endpoint.", + Read: dataNetboxJSONIpamFhrpGroupAssignmentsListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONIpamFhrpGroupAssignmentsListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := ipam.NewIpamFhrpGroupAssignmentsListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := ipam.NewIpamFhrpGroupAssignmentsListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Ipam.IpamFhrpGroupAssignmentsList(params, nil) - if err != nil { - return err - } + list, err := client.Ipam.IpamFhrpGroupAssignmentsList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONIpamFhrpGroupAssignmentsList") + d.Set("json", string(j)) + d.SetId("NetboxJSONIpamFhrpGroupAssignmentsList") - return nil + return nil } diff --git a/netbox/data_netbox_json_ipam_fhrp_groups_list.go b/netbox/data_netbox_json_ipam_fhrp_groups_list.go index b62362aa2..e0d0a532f 100644 --- a/netbox/data_netbox_json_ipam_fhrp_groups_list.go +++ b/netbox/data_netbox_json_ipam_fhrp_groups_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/ipam" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/ipam" ) func dataNetboxJSONIpamFhrpGroupsList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONIpamFhrpGroupsListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the ipam_fhrp_groups_list Netbox endpoint.", + Read: dataNetboxJSONIpamFhrpGroupsListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONIpamFhrpGroupsListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := ipam.NewIpamFhrpGroupsListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := ipam.NewIpamFhrpGroupsListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Ipam.IpamFhrpGroupsList(params, nil) - if err != nil { - return err - } + list, err := client.Ipam.IpamFhrpGroupsList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONIpamFhrpGroupsList") + d.Set("json", string(j)) + d.SetId("NetboxJSONIpamFhrpGroupsList") - return nil + return nil } diff --git a/netbox/data_netbox_json_ipam_ip_addresses_list.go b/netbox/data_netbox_json_ipam_ip_addresses_list.go index b5fd42cde..ce5aa380d 100644 --- a/netbox/data_netbox_json_ipam_ip_addresses_list.go +++ b/netbox/data_netbox_json_ipam_ip_addresses_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/ipam" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/ipam" ) func dataNetboxJSONIpamIPAddressesList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONIpamIPAddressesListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the ipam_ip_addresses_list Netbox endpoint.", + Read: dataNetboxJSONIpamIPAddressesListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONIpamIPAddressesListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := ipam.NewIpamIPAddressesListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := ipam.NewIpamIPAddressesListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Ipam.IpamIPAddressesList(params, nil) - if err != nil { - return err - } + list, err := client.Ipam.IpamIPAddressesList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONIpamIPAddressesList") + d.Set("json", string(j)) + d.SetId("NetboxJSONIpamIPAddressesList") - return nil + return nil } diff --git a/netbox/data_netbox_json_ipam_ip_ranges_list.go b/netbox/data_netbox_json_ipam_ip_ranges_list.go index feda16163..8298f2cb5 100644 --- a/netbox/data_netbox_json_ipam_ip_ranges_list.go +++ b/netbox/data_netbox_json_ipam_ip_ranges_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/ipam" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/ipam" ) func dataNetboxJSONIpamIPRangesList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONIpamIPRangesListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the ipam_ip_ranges_list Netbox endpoint.", + Read: dataNetboxJSONIpamIPRangesListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONIpamIPRangesListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := ipam.NewIpamIPRangesListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := ipam.NewIpamIPRangesListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Ipam.IpamIPRangesList(params, nil) - if err != nil { - return err - } + list, err := client.Ipam.IpamIPRangesList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONIpamIPRangesList") + d.Set("json", string(j)) + d.SetId("NetboxJSONIpamIPRangesList") - return nil + return nil } diff --git a/netbox/data_netbox_json_ipam_prefixes_list.go b/netbox/data_netbox_json_ipam_prefixes_list.go index ae1a32d37..fc66ba0e8 100644 --- a/netbox/data_netbox_json_ipam_prefixes_list.go +++ b/netbox/data_netbox_json_ipam_prefixes_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/ipam" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/ipam" ) func dataNetboxJSONIpamPrefixesList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONIpamPrefixesListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the ipam_prefixes_list Netbox endpoint.", + Read: dataNetboxJSONIpamPrefixesListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONIpamPrefixesListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := ipam.NewIpamPrefixesListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := ipam.NewIpamPrefixesListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Ipam.IpamPrefixesList(params, nil) - if err != nil { - return err - } + list, err := client.Ipam.IpamPrefixesList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONIpamPrefixesList") + d.Set("json", string(j)) + d.SetId("NetboxJSONIpamPrefixesList") - return nil + return nil } diff --git a/netbox/data_netbox_json_ipam_rirs_list.go b/netbox/data_netbox_json_ipam_rirs_list.go index 904c23256..8c38bd540 100644 --- a/netbox/data_netbox_json_ipam_rirs_list.go +++ b/netbox/data_netbox_json_ipam_rirs_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/ipam" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/ipam" ) func dataNetboxJSONIpamRirsList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONIpamRirsListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the ipam_rirs_list Netbox endpoint.", + Read: dataNetboxJSONIpamRirsListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONIpamRirsListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := ipam.NewIpamRirsListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := ipam.NewIpamRirsListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Ipam.IpamRirsList(params, nil) - if err != nil { - return err - } + list, err := client.Ipam.IpamRirsList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONIpamRirsList") + d.Set("json", string(j)) + d.SetId("NetboxJSONIpamRirsList") - return nil + return nil } diff --git a/netbox/data_netbox_json_ipam_roles_list.go b/netbox/data_netbox_json_ipam_roles_list.go index 176f32e16..753d5bc9d 100644 --- a/netbox/data_netbox_json_ipam_roles_list.go +++ b/netbox/data_netbox_json_ipam_roles_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/ipam" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/ipam" ) func dataNetboxJSONIpamRolesList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONIpamRolesListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the ipam_roles_list Netbox endpoint.", + Read: dataNetboxJSONIpamRolesListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONIpamRolesListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := ipam.NewIpamRolesListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := ipam.NewIpamRolesListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Ipam.IpamRolesList(params, nil) - if err != nil { - return err - } + list, err := client.Ipam.IpamRolesList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONIpamRolesList") + d.Set("json", string(j)) + d.SetId("NetboxJSONIpamRolesList") - return nil + return nil } diff --git a/netbox/data_netbox_json_ipam_route_targets_list.go b/netbox/data_netbox_json_ipam_route_targets_list.go index ce3bf7627..721e8e899 100644 --- a/netbox/data_netbox_json_ipam_route_targets_list.go +++ b/netbox/data_netbox_json_ipam_route_targets_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/ipam" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/ipam" ) func dataNetboxJSONIpamRouteTargetsList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONIpamRouteTargetsListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the ipam_route_targets_list Netbox endpoint.", + Read: dataNetboxJSONIpamRouteTargetsListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONIpamRouteTargetsListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := ipam.NewIpamRouteTargetsListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := ipam.NewIpamRouteTargetsListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Ipam.IpamRouteTargetsList(params, nil) - if err != nil { - return err - } + list, err := client.Ipam.IpamRouteTargetsList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONIpamRouteTargetsList") + d.Set("json", string(j)) + d.SetId("NetboxJSONIpamRouteTargetsList") - return nil + return nil } diff --git a/netbox/data_netbox_json_ipam_services_list.go b/netbox/data_netbox_json_ipam_services_list.go index a920b1741..da485818e 100644 --- a/netbox/data_netbox_json_ipam_services_list.go +++ b/netbox/data_netbox_json_ipam_services_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/ipam" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/ipam" ) func dataNetboxJSONIpamServicesList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONIpamServicesListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the ipam_services_list Netbox endpoint.", + Read: dataNetboxJSONIpamServicesListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONIpamServicesListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := ipam.NewIpamServicesListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := ipam.NewIpamServicesListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Ipam.IpamServicesList(params, nil) - if err != nil { - return err - } + list, err := client.Ipam.IpamServicesList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONIpamServicesList") + d.Set("json", string(j)) + d.SetId("NetboxJSONIpamServicesList") - return nil + return nil } diff --git a/netbox/data_netbox_json_ipam_vlan_groups_list.go b/netbox/data_netbox_json_ipam_vlan_groups_list.go index 9ea2e7668..540138c75 100644 --- a/netbox/data_netbox_json_ipam_vlan_groups_list.go +++ b/netbox/data_netbox_json_ipam_vlan_groups_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/ipam" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/ipam" ) func dataNetboxJSONIpamVlanGroupsList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONIpamVlanGroupsListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the ipam_vlan_groups_list Netbox endpoint.", + Read: dataNetboxJSONIpamVlanGroupsListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONIpamVlanGroupsListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := ipam.NewIpamVlanGroupsListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := ipam.NewIpamVlanGroupsListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Ipam.IpamVlanGroupsList(params, nil) - if err != nil { - return err - } + list, err := client.Ipam.IpamVlanGroupsList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONIpamVlanGroupsList") + d.Set("json", string(j)) + d.SetId("NetboxJSONIpamVlanGroupsList") - return nil + return nil } diff --git a/netbox/data_netbox_json_ipam_vlans_list.go b/netbox/data_netbox_json_ipam_vlans_list.go index 390fb2691..9ef80c8ff 100644 --- a/netbox/data_netbox_json_ipam_vlans_list.go +++ b/netbox/data_netbox_json_ipam_vlans_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/ipam" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/ipam" ) func dataNetboxJSONIpamVlansList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONIpamVlansListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the ipam_vlans_list Netbox endpoint.", + Read: dataNetboxJSONIpamVlansListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONIpamVlansListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := ipam.NewIpamVlansListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := ipam.NewIpamVlansListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Ipam.IpamVlansList(params, nil) - if err != nil { - return err - } + list, err := client.Ipam.IpamVlansList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONIpamVlansList") + d.Set("json", string(j)) + d.SetId("NetboxJSONIpamVlansList") - return nil + return nil } diff --git a/netbox/data_netbox_json_ipam_vrfs_list.go b/netbox/data_netbox_json_ipam_vrfs_list.go index 7d7b70d09..985350eef 100644 --- a/netbox/data_netbox_json_ipam_vrfs_list.go +++ b/netbox/data_netbox_json_ipam_vrfs_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/ipam" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/ipam" ) func dataNetboxJSONIpamVrfsList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONIpamVrfsListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the ipam_vrfs_list Netbox endpoint.", + Read: dataNetboxJSONIpamVrfsListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONIpamVrfsListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := ipam.NewIpamVrfsListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := ipam.NewIpamVrfsListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Ipam.IpamVrfsList(params, nil) - if err != nil { - return err - } + list, err := client.Ipam.IpamVrfsList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONIpamVrfsList") + d.Set("json", string(j)) + d.SetId("NetboxJSONIpamVrfsList") - return nil + return nil } diff --git a/netbox/data_netbox_json_tenancy_contact_assignments_list.go b/netbox/data_netbox_json_tenancy_contact_assignments_list.go index 638f843a6..b51fb2973 100644 --- a/netbox/data_netbox_json_tenancy_contact_assignments_list.go +++ b/netbox/data_netbox_json_tenancy_contact_assignments_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/tenancy" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/tenancy" ) func dataNetboxJSONTenancyContactAssignmentsList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONTenancyContactAssignmentsListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the tenancy_contact_assignments_list Netbox endpoint.", + Read: dataNetboxJSONTenancyContactAssignmentsListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONTenancyContactAssignmentsListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := tenancy.NewTenancyContactAssignmentsListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := tenancy.NewTenancyContactAssignmentsListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Tenancy.TenancyContactAssignmentsList(params, nil) - if err != nil { - return err - } + list, err := client.Tenancy.TenancyContactAssignmentsList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONTenancyContactAssignmentsList") + d.Set("json", string(j)) + d.SetId("NetboxJSONTenancyContactAssignmentsList") - return nil + return nil } diff --git a/netbox/data_netbox_json_tenancy_contact_groups_list.go b/netbox/data_netbox_json_tenancy_contact_groups_list.go index 3d3c4ded1..c114a6eac 100644 --- a/netbox/data_netbox_json_tenancy_contact_groups_list.go +++ b/netbox/data_netbox_json_tenancy_contact_groups_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/tenancy" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/tenancy" ) func dataNetboxJSONTenancyContactGroupsList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONTenancyContactGroupsListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the tenancy_contact_groups_list Netbox endpoint.", + Read: dataNetboxJSONTenancyContactGroupsListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONTenancyContactGroupsListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := tenancy.NewTenancyContactGroupsListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := tenancy.NewTenancyContactGroupsListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Tenancy.TenancyContactGroupsList(params, nil) - if err != nil { - return err - } + list, err := client.Tenancy.TenancyContactGroupsList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONTenancyContactGroupsList") + d.Set("json", string(j)) + d.SetId("NetboxJSONTenancyContactGroupsList") - return nil + return nil } diff --git a/netbox/data_netbox_json_tenancy_contact_roles_list.go b/netbox/data_netbox_json_tenancy_contact_roles_list.go index 2660891c2..a9c5e2d29 100644 --- a/netbox/data_netbox_json_tenancy_contact_roles_list.go +++ b/netbox/data_netbox_json_tenancy_contact_roles_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/tenancy" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/tenancy" ) func dataNetboxJSONTenancyContactRolesList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONTenancyContactRolesListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the tenancy_contact_roles_list Netbox endpoint.", + Read: dataNetboxJSONTenancyContactRolesListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONTenancyContactRolesListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := tenancy.NewTenancyContactRolesListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := tenancy.NewTenancyContactRolesListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Tenancy.TenancyContactRolesList(params, nil) - if err != nil { - return err - } + list, err := client.Tenancy.TenancyContactRolesList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONTenancyContactRolesList") + d.Set("json", string(j)) + d.SetId("NetboxJSONTenancyContactRolesList") - return nil + return nil } diff --git a/netbox/data_netbox_json_tenancy_contacts_list.go b/netbox/data_netbox_json_tenancy_contacts_list.go index 99e5eaeef..021c358f1 100644 --- a/netbox/data_netbox_json_tenancy_contacts_list.go +++ b/netbox/data_netbox_json_tenancy_contacts_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/tenancy" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/tenancy" ) func dataNetboxJSONTenancyContactsList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONTenancyContactsListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the tenancy_contacts_list Netbox endpoint.", + Read: dataNetboxJSONTenancyContactsListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONTenancyContactsListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := tenancy.NewTenancyContactsListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := tenancy.NewTenancyContactsListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Tenancy.TenancyContactsList(params, nil) - if err != nil { - return err - } + list, err := client.Tenancy.TenancyContactsList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONTenancyContactsList") + d.Set("json", string(j)) + d.SetId("NetboxJSONTenancyContactsList") - return nil + return nil } diff --git a/netbox/data_netbox_json_tenancy_tenant_groups_list.go b/netbox/data_netbox_json_tenancy_tenant_groups_list.go index c5b7e8987..a8de200bf 100644 --- a/netbox/data_netbox_json_tenancy_tenant_groups_list.go +++ b/netbox/data_netbox_json_tenancy_tenant_groups_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/tenancy" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/tenancy" ) func dataNetboxJSONTenancyTenantGroupsList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONTenancyTenantGroupsListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the tenancy_tenant_groups_list Netbox endpoint.", + Read: dataNetboxJSONTenancyTenantGroupsListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONTenancyTenantGroupsListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := tenancy.NewTenancyTenantGroupsListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := tenancy.NewTenancyTenantGroupsListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Tenancy.TenancyTenantGroupsList(params, nil) - if err != nil { - return err - } + list, err := client.Tenancy.TenancyTenantGroupsList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONTenancyTenantGroupsList") + d.Set("json", string(j)) + d.SetId("NetboxJSONTenancyTenantGroupsList") - return nil + return nil } diff --git a/netbox/data_netbox_json_tenancy_tenants_list.go b/netbox/data_netbox_json_tenancy_tenants_list.go index 1bd44c126..6b139944b 100644 --- a/netbox/data_netbox_json_tenancy_tenants_list.go +++ b/netbox/data_netbox_json_tenancy_tenants_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/tenancy" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/tenancy" ) func dataNetboxJSONTenancyTenantsList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONTenancyTenantsListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the tenancy_tenants_list Netbox endpoint.", + Read: dataNetboxJSONTenancyTenantsListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONTenancyTenantsListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := tenancy.NewTenancyTenantsListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := tenancy.NewTenancyTenantsListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Tenancy.TenancyTenantsList(params, nil) - if err != nil { - return err - } + list, err := client.Tenancy.TenancyTenantsList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONTenancyTenantsList") + d.Set("json", string(j)) + d.SetId("NetboxJSONTenancyTenantsList") - return nil + return nil } diff --git a/netbox/data_netbox_json_users_groups_list.go b/netbox/data_netbox_json_users_groups_list.go index 82be892c3..a7d8c4433 100644 --- a/netbox/data_netbox_json_users_groups_list.go +++ b/netbox/data_netbox_json_users_groups_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/users" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/users" ) func dataNetboxJSONUsersGroupsList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONUsersGroupsListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the users_groups_list Netbox endpoint.", + Read: dataNetboxJSONUsersGroupsListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONUsersGroupsListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := users.NewUsersGroupsListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := users.NewUsersGroupsListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Users.UsersGroupsList(params, nil) - if err != nil { - return err - } + list, err := client.Users.UsersGroupsList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONUsersGroupsList") + d.Set("json", string(j)) + d.SetId("NetboxJSONUsersGroupsList") - return nil + return nil } diff --git a/netbox/data_netbox_json_users_permissions_list.go b/netbox/data_netbox_json_users_permissions_list.go index 01f8dfa64..830e8dc7d 100644 --- a/netbox/data_netbox_json_users_permissions_list.go +++ b/netbox/data_netbox_json_users_permissions_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/users" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/users" ) func dataNetboxJSONUsersPermissionsList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONUsersPermissionsListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the users_permissions_list Netbox endpoint.", + Read: dataNetboxJSONUsersPermissionsListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONUsersPermissionsListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := users.NewUsersPermissionsListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := users.NewUsersPermissionsListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Users.UsersPermissionsList(params, nil) - if err != nil { - return err - } + list, err := client.Users.UsersPermissionsList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONUsersPermissionsList") + d.Set("json", string(j)) + d.SetId("NetboxJSONUsersPermissionsList") - return nil + return nil } diff --git a/netbox/data_netbox_json_users_tokens_list.go b/netbox/data_netbox_json_users_tokens_list.go index c83b6aec6..1b3f912f5 100644 --- a/netbox/data_netbox_json_users_tokens_list.go +++ b/netbox/data_netbox_json_users_tokens_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/users" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/users" ) func dataNetboxJSONUsersTokensList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONUsersTokensListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the users_tokens_list Netbox endpoint.", + Read: dataNetboxJSONUsersTokensListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONUsersTokensListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := users.NewUsersTokensListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := users.NewUsersTokensListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Users.UsersTokensList(params, nil) - if err != nil { - return err - } + list, err := client.Users.UsersTokensList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONUsersTokensList") + d.Set("json", string(j)) + d.SetId("NetboxJSONUsersTokensList") - return nil + return nil } diff --git a/netbox/data_netbox_json_users_users_list.go b/netbox/data_netbox_json_users_users_list.go index 4ba4c1593..4339d1d39 100644 --- a/netbox/data_netbox_json_users_users_list.go +++ b/netbox/data_netbox_json_users_users_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/users" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/users" ) func dataNetboxJSONUsersUsersList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONUsersUsersListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the users_users_list Netbox endpoint.", + Read: dataNetboxJSONUsersUsersListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONUsersUsersListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := users.NewUsersUsersListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := users.NewUsersUsersListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Users.UsersUsersList(params, nil) - if err != nil { - return err - } + list, err := client.Users.UsersUsersList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONUsersUsersList") + d.Set("json", string(j)) + d.SetId("NetboxJSONUsersUsersList") - return nil + return nil } diff --git a/netbox/data_netbox_json_virtualization_cluster_groups_list.go b/netbox/data_netbox_json_virtualization_cluster_groups_list.go index dd59336a9..70789698b 100644 --- a/netbox/data_netbox_json_virtualization_cluster_groups_list.go +++ b/netbox/data_netbox_json_virtualization_cluster_groups_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/virtualization" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/virtualization" ) func dataNetboxJSONVirtualizationClusterGroupsList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONVirtualizationClusterGroupsListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the virtualization_cluster_groups_list Netbox endpoint.", + Read: dataNetboxJSONVirtualizationClusterGroupsListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONVirtualizationClusterGroupsListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := virtualization.NewVirtualizationClusterGroupsListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := virtualization.NewVirtualizationClusterGroupsListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Virtualization.VirtualizationClusterGroupsList(params, nil) - if err != nil { - return err - } + list, err := client.Virtualization.VirtualizationClusterGroupsList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONVirtualizationClusterGroupsList") + d.Set("json", string(j)) + d.SetId("NetboxJSONVirtualizationClusterGroupsList") - return nil + return nil } diff --git a/netbox/data_netbox_json_virtualization_cluster_types_list.go b/netbox/data_netbox_json_virtualization_cluster_types_list.go index 7304edcf6..dc56223f8 100644 --- a/netbox/data_netbox_json_virtualization_cluster_types_list.go +++ b/netbox/data_netbox_json_virtualization_cluster_types_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/virtualization" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/virtualization" ) func dataNetboxJSONVirtualizationClusterTypesList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONVirtualizationClusterTypesListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the virtualization_cluster_types_list Netbox endpoint.", + Read: dataNetboxJSONVirtualizationClusterTypesListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONVirtualizationClusterTypesListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := virtualization.NewVirtualizationClusterTypesListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := virtualization.NewVirtualizationClusterTypesListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Virtualization.VirtualizationClusterTypesList(params, nil) - if err != nil { - return err - } + list, err := client.Virtualization.VirtualizationClusterTypesList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONVirtualizationClusterTypesList") + d.Set("json", string(j)) + d.SetId("NetboxJSONVirtualizationClusterTypesList") - return nil + return nil } diff --git a/netbox/data_netbox_json_virtualization_clusters_list.go b/netbox/data_netbox_json_virtualization_clusters_list.go index ac044fe2b..65a6b4883 100644 --- a/netbox/data_netbox_json_virtualization_clusters_list.go +++ b/netbox/data_netbox_json_virtualization_clusters_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/virtualization" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/virtualization" ) func dataNetboxJSONVirtualizationClustersList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONVirtualizationClustersListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the virtualization_clusters_list Netbox endpoint.", + Read: dataNetboxJSONVirtualizationClustersListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONVirtualizationClustersListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := virtualization.NewVirtualizationClustersListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := virtualization.NewVirtualizationClustersListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Virtualization.VirtualizationClustersList(params, nil) - if err != nil { - return err - } + list, err := client.Virtualization.VirtualizationClustersList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONVirtualizationClustersList") + d.Set("json", string(j)) + d.SetId("NetboxJSONVirtualizationClustersList") - return nil + return nil } diff --git a/netbox/data_netbox_json_virtualization_interfaces_list.go b/netbox/data_netbox_json_virtualization_interfaces_list.go index 07aa5d61b..ef8f61437 100644 --- a/netbox/data_netbox_json_virtualization_interfaces_list.go +++ b/netbox/data_netbox_json_virtualization_interfaces_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/virtualization" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/virtualization" ) func dataNetboxJSONVirtualizationInterfacesList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONVirtualizationInterfacesListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the virtualization_interfaces_list Netbox endpoint.", + Read: dataNetboxJSONVirtualizationInterfacesListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONVirtualizationInterfacesListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := virtualization.NewVirtualizationInterfacesListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := virtualization.NewVirtualizationInterfacesListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Virtualization.VirtualizationInterfacesList(params, nil) - if err != nil { - return err - } + list, err := client.Virtualization.VirtualizationInterfacesList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONVirtualizationInterfacesList") + d.Set("json", string(j)) + d.SetId("NetboxJSONVirtualizationInterfacesList") - return nil + return nil } diff --git a/netbox/data_netbox_json_virtualization_virtual_machines_list.go b/netbox/data_netbox_json_virtualization_virtual_machines_list.go index 6c9626e60..fbf02ab7b 100644 --- a/netbox/data_netbox_json_virtualization_virtual_machines_list.go +++ b/netbox/data_netbox_json_virtualization_virtual_machines_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/virtualization" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/virtualization" ) func dataNetboxJSONVirtualizationVirtualMachinesList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONVirtualizationVirtualMachinesListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the virtualization_virtual_machines_list Netbox endpoint.", + Read: dataNetboxJSONVirtualizationVirtualMachinesListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONVirtualizationVirtualMachinesListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := virtualization.NewVirtualizationVirtualMachinesListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := virtualization.NewVirtualizationVirtualMachinesListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Virtualization.VirtualizationVirtualMachinesList(params, nil) - if err != nil { - return err - } + list, err := client.Virtualization.VirtualizationVirtualMachinesList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONVirtualizationVirtualMachinesList") + d.Set("json", string(j)) + d.SetId("NetboxJSONVirtualizationVirtualMachinesList") - return nil + return nil } diff --git a/netbox/data_netbox_json_wireless_wireless_lan_groups_list.go b/netbox/data_netbox_json_wireless_wireless_lan_groups_list.go index 2beded0dc..bc0450fb4 100644 --- a/netbox/data_netbox_json_wireless_wireless_lan_groups_list.go +++ b/netbox/data_netbox_json_wireless_wireless_lan_groups_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/wireless" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/wireless" ) func dataNetboxJSONWirelessWirelessLanGroupsList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONWirelessWirelessLanGroupsListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the wireless_wireless_lan_groups_list Netbox endpoint.", + Read: dataNetboxJSONWirelessWirelessLanGroupsListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONWirelessWirelessLanGroupsListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := wireless.NewWirelessWirelessLanGroupsListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := wireless.NewWirelessWirelessLanGroupsListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Wireless.WirelessWirelessLanGroupsList(params, nil) - if err != nil { - return err - } + list, err := client.Wireless.WirelessWirelessLanGroupsList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONWirelessWirelessLanGroupsList") + d.Set("json", string(j)) + d.SetId("NetboxJSONWirelessWirelessLanGroupsList") - return nil + return nil } diff --git a/netbox/data_netbox_json_wireless_wireless_lans_list.go b/netbox/data_netbox_json_wireless_wireless_lans_list.go index 7ce4c45ec..8e5f76c86 100644 --- a/netbox/data_netbox_json_wireless_wireless_lans_list.go +++ b/netbox/data_netbox_json_wireless_wireless_lans_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/wireless" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/wireless" ) func dataNetboxJSONWirelessWirelessLansList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONWirelessWirelessLansListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the wireless_wireless_lans_list Netbox endpoint.", + Read: dataNetboxJSONWirelessWirelessLansListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONWirelessWirelessLansListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := wireless.NewWirelessWirelessLansListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := wireless.NewWirelessWirelessLansListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Wireless.WirelessWirelessLansList(params, nil) - if err != nil { - return err - } + list, err := client.Wireless.WirelessWirelessLansList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONWirelessWirelessLansList") + d.Set("json", string(j)) + d.SetId("NetboxJSONWirelessWirelessLansList") - return nil + return nil } diff --git a/netbox/data_netbox_json_wireless_wireless_links_list.go b/netbox/data_netbox_json_wireless_wireless_links_list.go index bfa4f0b42..5bb90579f 100644 --- a/netbox/data_netbox_json_wireless_wireless_links_list.go +++ b/netbox/data_netbox_json_wireless_wireless_links_list.go @@ -1,47 +1,50 @@ package netbox import ( - "encoding/json" + "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - netboxclient "github.com/smutel/go-netbox/netbox/client" - "github.com/smutel/go-netbox/netbox/client/wireless" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + netboxclient "github.com/smutel/go-netbox/netbox/client" + "github.com/smutel/go-netbox/netbox/client/wireless" ) func dataNetboxJSONWirelessWirelessLinksList() *schema.Resource { - return &schema.Resource{ - Read: dataNetboxJSONWirelessWirelessLinksListRead, - - Schema: map[string]*schema.Schema{ - "limit": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "json": { - Type: schema.TypeString, - Computed: true, - }, - }, - } + return &schema.Resource{ + Description: "Get json output from the wireless_wireless_links_list Netbox endpoint.", + Read: dataNetboxJSONWirelessWirelessLinksListRead, + + Schema: map[string]*schema.Schema{ + "limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", + }, + "json": { + Type: schema.TypeString, + Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", + }, + }, + } } func dataNetboxJSONWirelessWirelessLinksListRead(d *schema.ResourceData, m interface{}) error { - client := m.(*netboxclient.NetBoxAPI) + client := m.(*netboxclient.NetBoxAPI) - params := wireless.NewWirelessWirelessLinksListParams() - limit := int64(d.Get("limit").(int)) - params.Limit = &limit + params := wireless.NewWirelessWirelessLinksListParams() + limit := int64(d.Get("limit").(int)) + params.Limit = &limit - list, err := client.Wireless.WirelessWirelessLinksList(params, nil) - if err != nil { - return err - } + list, err := client.Wireless.WirelessWirelessLinksList(params, nil) + if err != nil { + return err + } - j, _ := json.Marshal(list.Payload.Results) + j, _ := json.Marshal(list.Payload.Results) - d.Set("json", string(j)) - d.SetId("NetboxJSONWirelessWirelessLinksList") + d.Set("json", string(j)) + d.SetId("NetboxJSONWirelessWirelessLinksList") - return nil + return nil } diff --git a/netbox/provider.go b/netbox/provider.go index 32aecb782..581bafa1a 100644 --- a/netbox/provider.go +++ b/netbox/provider.go @@ -22,31 +22,31 @@ func Provider() *schema.Provider { Type: schema.TypeString, Required: true, DefaultFunc: schema.EnvDefaultFunc("NETBOX_URL", "127.0.0.1:8000"), - Description: "URL to reach netbox application.", + Description: "URL and port to reach netbox application (127.0.0.1:8000 by default).", }, "basepath": { Type: schema.TypeString, Optional: true, DefaultFunc: schema.EnvDefaultFunc("NETBOX_BASEPATH", client.DefaultBasePath), - Description: "URL path to the netbox API.", + Description: "URL base path to the netbox API (/api by default).", }, "token": { Type: schema.TypeString, Required: true, DefaultFunc: schema.EnvDefaultFunc("NETBOX_TOKEN", ""), - Description: "Token used for API operations.", + Description: "Token used for API operations (empty by default).", }, "scheme": { Type: schema.TypeString, Optional: true, DefaultFunc: schema.EnvDefaultFunc("NETBOX_SCHEME", "https"), - Description: "Scheme used to reach netbox application.", + Description: "Scheme used to reach netbox application (https by default).", }, "insecure": { Type: schema.TypeBool, Optional: true, DefaultFunc: schema.EnvDefaultFunc("NETBOX_INSECURE", false), - Description: "Skip TLS certificate validation.", + Description: "Skip TLS certificate validation (false by default).", }, }, DataSourcesMap: map[string]*schema.Resource{ diff --git a/netbox/resource_netbox_ipam_aggregate.go b/netbox/resource_netbox_ipam_aggregate.go index a2a000cd0..9255821ce 100644 --- a/netbox/resource_netbox_ipam_aggregate.go +++ b/netbox/resource_netbox_ipam_aggregate.go @@ -15,19 +15,21 @@ import ( func resourceNetboxIpamAggregate() *schema.Resource { return &schema.Resource{ - Create: resourceNetboxIpamAggregateCreate, - Read: resourceNetboxIpamAggregateRead, - Update: resourceNetboxIpamAggregateUpdate, - Delete: resourceNetboxIpamAggregateDelete, - Exists: resourceNetboxIpamAggregateExists, + Description: "Manage an aggregate (ipam module) within Netbox.", + Create: resourceNetboxIpamAggregateCreate, + Read: resourceNetboxIpamAggregateRead, + Update: resourceNetboxIpamAggregateUpdate, + Delete: resourceNetboxIpamAggregateDelete, + Exists: resourceNetboxIpamAggregateExists, Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ "content_type": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "The content type of this aggregate (ipam module).", }, "custom_field": { Type: schema.TypeSet, @@ -35,21 +37,25 @@ func resourceNetboxIpamAggregate() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Name of the existing custom field.", }, "type": { Type: schema.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{"text", "integer", "boolean", "date", "url", "selection", "multiple"}, false), + Description: "Type of the existing custom field (text, integer, boolean, url, selection, multiple).", }, "value": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Value of the existing custom field.", }, }, }, + Description: "Existing custom fields to associate to this aggregate (ipam module).", }, "date_added": { Type: schema.TypeString, @@ -63,21 +69,25 @@ func resourceNetboxIpamAggregate() *schema.Resource { } return }, + Description: "Date when this aggregate was added. Format *YYYY-MM-DD*.", }, "description": { Type: schema.TypeString, Optional: true, Default: nil, ValidateFunc: validation.StringLenBetween(1, 200), + Description: "The description of this aggregate (ipam module).", }, "prefix": { Type: schema.TypeString, Required: true, ValidateFunc: validation.IsCIDRNetwork(0, 256), + Description: "The network prefix of this aggregate (ipam module).", }, "rir_id": { - Type: schema.TypeInt, - Required: true, + Type: schema.TypeInt, + Required: true, + Description: "The RIR id linked to this aggregate (ipam module).", }, "tag": { Type: schema.TypeSet, @@ -85,15 +95,18 @@ func resourceNetboxIpamAggregate() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Name of the existing tag.", }, "slug": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Slug of the existing tag.", }, }, }, + Description: "Existing tag to associate to this aggregate (ipam module).", }, }, } diff --git a/netbox/resource_netbox_ipam_ip_addresses.go b/netbox/resource_netbox_ipam_ip_addresses.go index d551ab743..f09b0bf42 100644 --- a/netbox/resource_netbox_ipam_ip_addresses.go +++ b/netbox/resource_netbox_ipam_ip_addresses.go @@ -14,11 +14,12 @@ import ( func resourceNetboxIpamIPAddresses() *schema.Resource { return &schema.Resource{ - Create: resourceNetboxIpamIPAddressesCreate, - Read: resourceNetboxIpamIPAddressesRead, - Update: resourceNetboxIpamIPAddressesUpdate, - Delete: resourceNetboxIpamIPAddressesDelete, - Exists: resourceNetboxIpamIPAddressesExists, + Description: "Manage an IP address (ipam module) within Netbox.", + Create: resourceNetboxIpamIPAddressesCreate, + Read: resourceNetboxIpamIPAddressesRead, + Update: resourceNetboxIpamIPAddressesUpdate, + Delete: resourceNetboxIpamIPAddressesDelete, + Exists: resourceNetboxIpamIPAddressesExists, Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, @@ -28,10 +29,12 @@ func resourceNetboxIpamIPAddresses() *schema.Resource { Type: schema.TypeString, Required: true, ValidateFunc: validation.IsCIDR, + Description: "The IP address (with mask) used for this IP address (ipam module).", }, "content_type": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "The content type of this IP address (ipam module).", }, "custom_field": { Type: schema.TypeSet, @@ -39,27 +42,32 @@ func resourceNetboxIpamIPAddresses() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Name of the existing custom field.", }, "type": { Type: schema.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{"text", "integer", "boolean", "date", "url", "selection", "multiple"}, false), + Description: "Type of the existing custom field (text, integer, boolean, url, selection, multiple).", }, "value": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Value of the existing custom field.", }, }, }, + Description: "Existing custom fields to associate to this IP address (ipam module).", }, "description": { Type: schema.TypeString, Optional: true, Default: nil, ValidateFunc: validation.StringLenBetween(1, 100), + Description: "The description of this IP address (ipam module).", }, "dns_name": { Type: schema.TypeString, @@ -68,15 +76,18 @@ func resourceNetboxIpamIPAddresses() *schema.Resource { ValidateFunc: validation.StringMatch( regexp.MustCompile("^[-a-zA-Z0-9_.]{1,255}$"), "Must be like ^[-a-zA-Z0-9_.]{1,255}$"), + Description: "The DNS name of this IP address (ipam module).", }, "nat_inside_id": { - Type: schema.TypeInt, - Optional: true, + Type: schema.TypeInt, + Optional: true, + Description: "The ID of the NAT inside of this IP address (ipam module).", }, "object_id": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "The ID of the object where this resource is attached to.", }, "object_type": { Type: schema.TypeString, @@ -84,12 +95,14 @@ func resourceNetboxIpamIPAddresses() *schema.Resource { Default: "", ValidateFunc: validation.StringInSlice([]string{ VMInterfaceType, "dcim.interface"}, false), + Description: "The object type among virtualization.vminterface or dcim.interface (empty by default).", }, "primary_ip4": { - Type: schema.TypeBool, - Optional: true, - Default: false, - ForceNew: true, + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + Description: "Set this resource as primary IPv4 (false by default).", }, "role": { Type: schema.TypeString, @@ -98,6 +111,7 @@ func resourceNetboxIpamIPAddresses() *schema.Resource { ValidateFunc: validation.StringInSlice([]string{"loopback", "secondary", "anycast", "vip", "vrrp", "hsrp", "glbp", "carp"}, false), + Description: "The role among loopback, secondary, anycast, vip, vrrp, hsrp, glbp, carp of this IP address (ipam module).", }, "status": { Type: schema.TypeString, @@ -105,6 +119,7 @@ func resourceNetboxIpamIPAddresses() *schema.Resource { Default: "active", ValidateFunc: validation.StringInSlice([]string{"container", "active", "reserved", "deprecated", "dhcp"}, false), + Description: "The status among of this IP address (ipam module) container, active, reserved, deprecated (active by default).", }, "tag": { Type: schema.TypeSet, @@ -112,23 +127,28 @@ func resourceNetboxIpamIPAddresses() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Name of the existing tag.", }, "slug": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Slug of the existing tag.", }, }, }, + Description: "Existing tag to associate to this IP address (ipam module).", }, "tenant_id": { - Type: schema.TypeInt, - Optional: true, + Type: schema.TypeInt, + Optional: true, + Description: "ID of the tenant where this object is attached.", }, "vrf_id": { - Type: schema.TypeInt, - Optional: true, + Type: schema.TypeInt, + Optional: true, + Description: "ID of the vrf attached to this IP address (ipam module).", }, }, } diff --git a/netbox/resource_netbox_ipam_prefix.go b/netbox/resource_netbox_ipam_prefix.go index 3eb3a64ba..763df53ad 100644 --- a/netbox/resource_netbox_ipam_prefix.go +++ b/netbox/resource_netbox_ipam_prefix.go @@ -13,19 +13,21 @@ import ( func resourceNetboxIpamPrefix() *schema.Resource { return &schema.Resource{ - Create: resourceNetboxIpamPrefixCreate, - Read: resourceNetboxIpamPrefixRead, - Update: resourceNetboxIpamPrefixUpdate, - Delete: resourceNetboxIpamPrefixDelete, - Exists: resourceNetboxIpamPrefixExists, + Description: "Manage a prefix (ipam module) within Netbox.", + Create: resourceNetboxIpamPrefixCreate, + Read: resourceNetboxIpamPrefixRead, + Update: resourceNetboxIpamPrefixUpdate, + Delete: resourceNetboxIpamPrefixDelete, + Exists: resourceNetboxIpamPrefixExists, Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ "content_type": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "The content type of this prefix (ipam module).", }, "custom_field": { Type: schema.TypeSet, @@ -33,45 +35,54 @@ func resourceNetboxIpamPrefix() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Name of the existing custom field.", }, "type": { Type: schema.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{"text", "integer", "boolean", "date", "url", "selection", "multiple"}, false), + Description: "Type of the existing custom field (text, integer, boolean, url, selection, multiple).", }, "value": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Value of the existing custom field.", }, }, }, + Description: "Existing custom fields to associate to this prefix (ipam module).", }, "description": { Type: schema.TypeString, Optional: true, Default: nil, ValidateFunc: validation.StringLenBetween(1, 100), + Description: "The description of this prefix (ipam module).", }, "is_pool": { - Type: schema.TypeBool, - Optional: true, - Default: nil, + Type: schema.TypeBool, + Optional: true, + Default: nil, + Description: "Define if this object is a pool (false by default).", }, "prefix": { Type: schema.TypeString, Required: true, ValidateFunc: validation.IsCIDRNetwork(0, 256), + Description: "The prefix (IP address/mask) used for this prefix (ipam module).", }, "role_id": { - Type: schema.TypeInt, - Optional: true, + Type: schema.TypeInt, + Optional: true, + Description: "ID of the role attached to this prefix (ipam module).", }, "site_id": { - Type: schema.TypeInt, - Optional: true, + Type: schema.TypeInt, + Optional: true, + Description: "ID of the site where this prefix (ipam module) is located.", }, "status": { Type: schema.TypeString, @@ -79,6 +90,7 @@ func resourceNetboxIpamPrefix() *schema.Resource { Default: "active", ValidateFunc: validation.StringInSlice([]string{"container", "active", "reserved", "deprecated"}, false), + Description: "Status among container, active, reserved, deprecated (active by default).", }, "tag": { Type: schema.TypeSet, @@ -86,27 +98,33 @@ func resourceNetboxIpamPrefix() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Name of the existing tag.", }, "slug": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Slug of the existing tag.", }, }, }, + Description: "Existing tag to associate to this prefix (ipam module).", }, "tenant_id": { - Type: schema.TypeInt, - Optional: true, + Type: schema.TypeInt, + Optional: true, + Description: "ID of the tenant where this prefix (ipam module) is attached.", }, "vlan_id": { - Type: schema.TypeInt, - Optional: true, + Type: schema.TypeInt, + Optional: true, + Description: "ID of the vlan where this prefix (ipam module) is attached.", }, "vrf_id": { - Type: schema.TypeInt, - Optional: true, + Type: schema.TypeInt, + Optional: true, + Description: "ID of the vrf attached to this prefix (ipam module).", }, }, } diff --git a/netbox/resource_netbox_ipam_service.go b/netbox/resource_netbox_ipam_service.go index 09f3f1e3f..dd478c31d 100644 --- a/netbox/resource_netbox_ipam_service.go +++ b/netbox/resource_netbox_ipam_service.go @@ -13,19 +13,21 @@ import ( func resourceNetboxIpamService() *schema.Resource { return &schema.Resource{ - Create: resourceNetboxIpamServiceCreate, - Read: resourceNetboxIpamServiceRead, - Update: resourceNetboxIpamServiceUpdate, - Delete: resourceNetboxIpamServiceDelete, - Exists: resourceNetboxIpamServiceExists, + Description: "Manage an service (ipam module) within Netbox.", + Create: resourceNetboxIpamServiceCreate, + Read: resourceNetboxIpamServiceRead, + Update: resourceNetboxIpamServiceUpdate, + Delete: resourceNetboxIpamServiceDelete, + Exists: resourceNetboxIpamServiceExists, Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ "content_type": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "The content type of this service (ipam module).", }, "custom_field": { Type: schema.TypeSet, @@ -33,32 +35,38 @@ func resourceNetboxIpamService() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Name of the existing custom field.", }, "type": { Type: schema.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{"text", "integer", "boolean", "date", "url", "selection", "multiple"}, false), + Description: "Type of the existing custom field (text, integer, boolean, url, selection, multiple).", }, "value": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Value of the existing custom field.", }, }, }, + Description: "Existing custom fields to associate to this service (ipam module).", }, "description": { Type: schema.TypeString, Optional: true, Default: nil, ValidateFunc: validation.StringLenBetween(1, 200), + Description: "The description of this service (ipam module).", }, "device_id": { Type: schema.TypeInt, Optional: true, ExactlyOneOf: []string{"device_id", "virtualmachine_id"}, + Description: "ID of the device linked to this service (ipam module).", }, "ip_addresses_id": { Type: schema.TypeList, @@ -66,11 +74,13 @@ func resourceNetboxIpamService() *schema.Resource { Elem: &schema.Schema{ Type: schema.TypeInt, }, + Description: "Array of ID of IP addresses attached to this service (ipam module).", }, "name": { Type: schema.TypeString, Required: true, ValidateFunc: validation.StringLenBetween(1, 50), + Description: "The name for this service (ipam module).", }, "ports": { Type: schema.TypeList, @@ -78,11 +88,13 @@ func resourceNetboxIpamService() *schema.Resource { Elem: &schema.Schema{ Type: schema.TypeInt, }, + Description: "Array of ports of this service (ipam module).", }, "protocol": { Type: schema.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{"tcp", "udp"}, false), + Description: "The protocol of this service (ipam module) (tcp or udp).", }, "tag": { Type: schema.TypeSet, @@ -90,19 +102,23 @@ func resourceNetboxIpamService() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Name of the existing tag.", }, "slug": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Slug of the existing tag.", }, }, }, + Description: "Existing tag to associate to this service (ipam module).", }, "virtualmachine_id": { - Type: schema.TypeInt, - Optional: true, + Type: schema.TypeInt, + Optional: true, + Description: "ID of the VM linked to this service (ipam module).", }, }, } diff --git a/netbox/resource_netbox_ipam_vlan.go b/netbox/resource_netbox_ipam_vlan.go index 2f1e51e93..81b249915 100644 --- a/netbox/resource_netbox_ipam_vlan.go +++ b/netbox/resource_netbox_ipam_vlan.go @@ -13,19 +13,21 @@ import ( func resourceNetboxIpamVlan() *schema.Resource { return &schema.Resource{ - Create: resourceNetboxIpamVlanCreate, - Read: resourceNetboxIpamVlanRead, - Update: resourceNetboxIpamVlanUpdate, - Delete: resourceNetboxIpamVlanDelete, - Exists: resourceNetboxIpamVlanExists, + Description: "Manage a vlan (ipam module) within Netbox.", + Create: resourceNetboxIpamVlanCreate, + Read: resourceNetboxIpamVlanRead, + Update: resourceNetboxIpamVlanUpdate, + Delete: resourceNetboxIpamVlanDelete, + Exists: resourceNetboxIpamVlanExists, Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ "content_type": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "The content type of this vlan (ipam module).", }, "custom_field": { Type: schema.TypeSet, @@ -33,44 +35,53 @@ func resourceNetboxIpamVlan() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Name of the existing custom field.", }, "type": { Type: schema.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{"text", "integer", "boolean", "date", "url", "selection", "multiple"}, false), + Description: "Type of the existing custom field (text, integer, boolean, url, selection, multiple).", }, "value": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Value of the existing custom field.", }, }, }, + Description: "Existing custom fields to associate to this vlan (ipam module).", }, "description": { Type: schema.TypeString, Optional: true, Default: nil, ValidateFunc: validation.StringLenBetween(1, 100), + Description: "The description of this vlan (ipam module).", }, "vlan_group_id": { - Type: schema.TypeInt, - Optional: true, + Type: schema.TypeInt, + Optional: true, + Description: "ID of the group where this vlan (ipam module) belongs to.", }, "name": { Type: schema.TypeString, Required: true, ValidateFunc: validation.StringLenBetween(1, 50), + Description: "The name for this vlan (ipam module).", }, "role_id": { - Type: schema.TypeInt, - Optional: true, + Type: schema.TypeInt, + Optional: true, + Description: "ID of the role attached to this vlan (ipam module).", }, "site_id": { - Type: schema.TypeInt, - Optional: true, + Type: schema.TypeInt, + Optional: true, + Description: "ID of the site where this vlan (ipam module) is located.", }, "status": { Type: schema.TypeString, @@ -78,6 +89,7 @@ func resourceNetboxIpamVlan() *schema.Resource { Default: "active", ValidateFunc: validation.StringInSlice([]string{"active", "reserved", "deprecated"}, false), + Description: "The description of this vlan (ipam module).", }, "tag": { Type: schema.TypeSet, @@ -85,23 +97,28 @@ func resourceNetboxIpamVlan() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Name of the existing tag.", }, "slug": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Slug of the existing tag.", }, }, }, + Description: "Existing tag to associate to this vlan (ipam module).", }, "tenant_id": { - Type: schema.TypeInt, - Optional: true, + Type: schema.TypeInt, + Optional: true, + Description: "ID of the tenant where this vlan (ipam module) is attached.", }, "vlan_id": { - Type: schema.TypeInt, - Required: true, + Type: schema.TypeInt, + Required: true, + Description: "The ID of the vlan (vlan tag).", }, }, } diff --git a/netbox/resource_netbox_ipam_vlan_group.go b/netbox/resource_netbox_ipam_vlan_group.go index c13b69d0e..fea7049b3 100644 --- a/netbox/resource_netbox_ipam_vlan_group.go +++ b/netbox/resource_netbox_ipam_vlan_group.go @@ -14,24 +14,27 @@ import ( func resourceNetboxIpamVlanGroup() *schema.Resource { return &schema.Resource{ - Create: resourceNetboxIpamVlanGroupCreate, - Read: resourceNetboxIpamVlanGroupRead, - Update: resourceNetboxIpamVlanGroupUpdate, - Delete: resourceNetboxIpamVlanGroupDelete, - Exists: resourceNetboxIpamVlanGroupExists, + Description: "Manage a vlan group (ipam module) within Netbox.", + Create: resourceNetboxIpamVlanGroupCreate, + Read: resourceNetboxIpamVlanGroupRead, + Update: resourceNetboxIpamVlanGroupUpdate, + Delete: resourceNetboxIpamVlanGroupDelete, + Exists: resourceNetboxIpamVlanGroupExists, Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ "content_type": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "The content type of this vlan group (ipam module).", }, "name": { Type: schema.TypeString, Required: true, ValidateFunc: validation.StringLenBetween(1, 50), + Description: "The name for this vlan group (ipam module).", }, "slug": { Type: schema.TypeString, @@ -39,6 +42,7 @@ func resourceNetboxIpamVlanGroup() *schema.Resource { ValidateFunc: validation.StringMatch( regexp.MustCompile("^[-a-zA-Z0-9_]{1,50}$"), "Must be like ^[-a-zA-Z0-9_]{1,50}$"), + Description: "The slug for this vlan group (ipam module).", }, "tag": { Type: schema.TypeSet, @@ -46,15 +50,18 @@ func resourceNetboxIpamVlanGroup() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Name of the existing tag.", }, "slug": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Slug of the existing tag.", }, }, }, + Description: "Existing tag to associate to this vlan group (ipam module).", }, }, } diff --git a/netbox/resource_netbox_tenancy_contact.go b/netbox/resource_netbox_tenancy_contact.go index 7ecd9c824..a8b1a37da 100644 --- a/netbox/resource_netbox_tenancy_contact.go +++ b/netbox/resource_netbox_tenancy_contact.go @@ -14,11 +14,12 @@ import ( func resourceNetboxTenancyContact() *schema.Resource { return &schema.Resource{ - Create: resourceNetboxTenancyContactCreate, - Read: resourceNetboxTenancyContactRead, - Update: resourceNetboxTenancyContactUpdate, - Delete: resourceNetboxTenancyContactDelete, - Exists: resourceNetboxTenancyContactExists, + Description: "Manage a contact (tenancy module) within Netbox.", + Create: resourceNetboxTenancyContactCreate, + Read: resourceNetboxTenancyContactRead, + Update: resourceNetboxTenancyContactUpdate, + Delete: resourceNetboxTenancyContactDelete, + Exists: resourceNetboxTenancyContactExists, Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, @@ -29,20 +30,24 @@ func resourceNetboxTenancyContact() *schema.Resource { Optional: true, Default: nil, ValidateFunc: validation.StringLenBetween(1, 200), + Description: "The address for this contact (tenancy module).", }, "comments": { - Type: schema.TypeString, - Optional: true, - Default: nil, + Type: schema.TypeString, + Optional: true, + Default: nil, + Description: "Comments for this contact (tenancy module).", }, "contact_group_id": { - Type: schema.TypeInt, - Optional: true, - Default: 0, + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "ID of the group where this contact (tenancy module) belongs to.", }, "content_type": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "The content type of this contact (tenancy module).", }, "custom_field": { Type: schema.TypeSet, @@ -50,21 +55,25 @@ func resourceNetboxTenancyContact() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Name of the existing custom field.", }, "type": { Type: schema.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{"text", "integer", "boolean", "date", "url", "selection", "multiple"}, false), + Description: "Type of the existing custom field (text, integer, boolean, url, selection, multiple).", }, "value": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Value of the existing custom field.", }, }, }, + Description: "Existing custom fields to associate to this contact (tenancy module).", }, "email": { Type: schema.TypeString, @@ -79,17 +88,20 @@ func resourceNetboxTenancyContact() *schema.Resource { } return }, + Description: "The e-mail for this contact (tenancy module).", }, "name": { Type: schema.TypeString, Required: true, ValidateFunc: validation.StringLenBetween(1, 100), + Description: "The name for this contact (tenancy module).", }, "phone": { Type: schema.TypeString, Optional: true, Default: nil, ValidateFunc: validation.StringLenBetween(1, 50), + Description: "The phone for this contact (tenancy module).", }, "tag": { Type: schema.TypeSet, @@ -97,21 +109,25 @@ func resourceNetboxTenancyContact() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Name of the existing tag.", }, "slug": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Slug of the existing tag.", }, }, }, + Description: "Existing tag to associate to this contact (tenancy module).", }, "title": { Type: schema.TypeString, Optional: true, Default: nil, ValidateFunc: validation.StringLenBetween(1, 100), + Description: "The title for this contact (tenancy module).", }, }, } diff --git a/netbox/resource_netbox_tenancy_contact_assignment.go b/netbox/resource_netbox_tenancy_contact_assignment.go index 0b740e84c..04d878cb2 100644 --- a/netbox/resource_netbox_tenancy_contact_assignment.go +++ b/netbox/resource_netbox_tenancy_contact_assignment.go @@ -13,31 +13,36 @@ import ( func resourceNetboxTenancyContactAssignment() *schema.Resource { return &schema.Resource{ - Create: resourceNetboxTenancyContactAssignmentCreate, - Read: resourceNetboxTenancyContactAssignmentRead, - Update: resourceNetboxTenancyContactAssignmentUpdate, - Delete: resourceNetboxTenancyContactAssignmentDelete, - Exists: resourceNetboxTenancyContactAssignmentExists, + Description: "Link a contact (tenancy module) to another resource within Netbox.", + Create: resourceNetboxTenancyContactAssignmentCreate, + Read: resourceNetboxTenancyContactAssignmentRead, + Update: resourceNetboxTenancyContactAssignmentUpdate, + Delete: resourceNetboxTenancyContactAssignmentDelete, + Exists: resourceNetboxTenancyContactAssignmentExists, Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ "contact_id": { - Type: schema.TypeInt, - Required: true, + Type: schema.TypeInt, + Required: true, + Description: "ID of the contact to link to this contact assignment (tenancy module).", }, "contact_role_id": { - Type: schema.TypeInt, - Required: true, + Type: schema.TypeInt, + Required: true, + Description: "The role of the contact for this contact assignment (tenancy module).", }, "content_type": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Type of the object where the contact will be linked.", }, "object_id": { - Type: schema.TypeInt, - Required: true, + Type: schema.TypeInt, + Required: true, + Description: "ID of the object where the contact will be linked.", }, "priority": { Type: schema.TypeString, @@ -45,6 +50,7 @@ func resourceNetboxTenancyContactAssignment() *schema.Resource { Default: "primary", ValidateFunc: validation.StringInSlice([]string{"primary", "secondary", "tertiary", "inactive"}, false), + Description: "Priority of this contact among primary, secondary and tertiary (primary by default).", }, }, } diff --git a/netbox/resource_netbox_tenancy_contact_group.go b/netbox/resource_netbox_tenancy_contact_group.go index 8222d457a..17e123c6c 100644 --- a/netbox/resource_netbox_tenancy_contact_group.go +++ b/netbox/resource_netbox_tenancy_contact_group.go @@ -14,19 +14,21 @@ import ( func resourceNetboxTenancyContactGroup() *schema.Resource { return &schema.Resource{ - Create: resourceNetboxTenancyContactGroupCreate, - Read: resourceNetboxTenancyContactGroupRead, - Update: resourceNetboxTenancyContactGroupUpdate, - Delete: resourceNetboxTenancyContactGroupDelete, - Exists: resourceNetboxTenancyContactGroupExists, + Description: "Manage a contact group (tenancy module) within Netbox.", + Create: resourceNetboxTenancyContactGroupCreate, + Read: resourceNetboxTenancyContactGroupRead, + Update: resourceNetboxTenancyContactGroupUpdate, + Delete: resourceNetboxTenancyContactGroupDelete, + Exists: resourceNetboxTenancyContactGroupExists, Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ "content_type": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "The content type of this contact group (tenancy module).", }, "custom_field": { Type: schema.TypeSet, @@ -34,37 +36,44 @@ func resourceNetboxTenancyContactGroup() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Name of the existing custom field.", }, "type": { Type: schema.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{"text", "integer", "boolean", "date", "url", "selection", "multiple"}, false), + Description: "Type of the existing custom field (text, integer, boolean, url, selection, multiple).", }, "value": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Value of the existing custom field.", }, }, }, + Description: "Existing custom fields to associate to this contact group (tenancy module).", }, "description": { Type: schema.TypeString, Optional: true, Default: nil, ValidateFunc: validation.StringLenBetween(1, 100), + Description: "Description for this contact group (tenancy module).", }, "name": { Type: schema.TypeString, Required: true, ValidateFunc: validation.StringLenBetween(1, 100), + Description: "The name for this contact group (tenancy module).", }, "parent_id": { - Type: schema.TypeInt, - Optional: true, - Default: 0, + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "ID of the contact group parent of this one.", }, "slug": { Type: schema.TypeString, @@ -72,6 +81,7 @@ func resourceNetboxTenancyContactGroup() *schema.Resource { ValidateFunc: validation.StringMatch( regexp.MustCompile("^[-a-zA-Z0-9_]{1,50}$"), "Must be like ^[-a-zA-Z0-9_]{1,50}$"), + Description: "The slug for this contact group (tenancy module).", }, "tag": { Type: schema.TypeSet, @@ -79,15 +89,18 @@ func resourceNetboxTenancyContactGroup() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Name of the existing tag.", }, "slug": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Slug of the existing tag.", }, }, }, + Description: "Existing tag to associate to this contact group (tenancy module).", }, }, } diff --git a/netbox/resource_netbox_tenancy_contact_role.go b/netbox/resource_netbox_tenancy_contact_role.go index 5a3ed948d..8a2efc871 100644 --- a/netbox/resource_netbox_tenancy_contact_role.go +++ b/netbox/resource_netbox_tenancy_contact_role.go @@ -14,19 +14,21 @@ import ( func resourceNetboxTenancyContactRole() *schema.Resource { return &schema.Resource{ - Create: resourceNetboxTenancyContactRoleCreate, - Read: resourceNetboxTenancyContactRoleRead, - Update: resourceNetboxTenancyContactRoleUpdate, - Delete: resourceNetboxTenancyContactRoleDelete, - Exists: resourceNetboxTenancyContactRoleExists, + Description: "Manage a contact role (tenancy module) within Netbox.", + Create: resourceNetboxTenancyContactRoleCreate, + Read: resourceNetboxTenancyContactRoleRead, + Update: resourceNetboxTenancyContactRoleUpdate, + Delete: resourceNetboxTenancyContactRoleDelete, + Exists: resourceNetboxTenancyContactRoleExists, Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ "content_type": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "The content type of this contact role (tenancy module).", }, "custom_field": { Type: schema.TypeSet, @@ -34,32 +36,38 @@ func resourceNetboxTenancyContactRole() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Name of the existing custom field.", }, "type": { Type: schema.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{"text", "integer", "boolean", "date", "url", "selection", "multiple"}, false), + Description: "Type of the existing custom field (text, integer, boolean, url, selection, multiple).", }, "value": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Value of the existing custom field.", }, }, }, + Description: "Existing custom fields to associate to this contact role (tenancy module).", }, "description": { Type: schema.TypeString, Optional: true, Default: nil, ValidateFunc: validation.StringLenBetween(1, 100), + Description: "Description for this contact role (tenancy module).", }, "name": { Type: schema.TypeString, Required: true, ValidateFunc: validation.StringLenBetween(1, 100), + Description: "Name of this contact role (tenancy module).", }, "slug": { Type: schema.TypeString, @@ -67,6 +75,7 @@ func resourceNetboxTenancyContactRole() *schema.Resource { ValidateFunc: validation.StringMatch( regexp.MustCompile("^[-a-zA-Z0-9_]{1,50}$"), "Must be like ^[-a-zA-Z0-9_]{1,50}$"), + Description: "Slug of this contact role (tenancy module).", }, "tag": { Type: schema.TypeSet, @@ -74,15 +83,18 @@ func resourceNetboxTenancyContactRole() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Name of the existing tag.", }, "slug": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Slug of the existing tag.", }, }, }, + Description: "Existing tag to associate to this contact role (tenancy module).", }, }, } diff --git a/netbox/resource_netbox_tenancy_tenant.go b/netbox/resource_netbox_tenancy_tenant.go index 0e0cf8f01..d0bb7bb59 100644 --- a/netbox/resource_netbox_tenancy_tenant.go +++ b/netbox/resource_netbox_tenancy_tenant.go @@ -14,24 +14,27 @@ import ( func resourceNetboxTenancyTenant() *schema.Resource { return &schema.Resource{ - Create: resourceNetboxTenancyTenantCreate, - Read: resourceNetboxTenancyTenantRead, - Update: resourceNetboxTenancyTenantUpdate, - Delete: resourceNetboxTenancyTenantDelete, - Exists: resourceNetboxTenancyTenantExists, + Description: "Manage a tenant (tenancy module) within Netbox.", + Create: resourceNetboxTenancyTenantCreate, + Read: resourceNetboxTenancyTenantRead, + Update: resourceNetboxTenancyTenantUpdate, + Delete: resourceNetboxTenancyTenantDelete, + Exists: resourceNetboxTenancyTenantExists, Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ "comments": { - Type: schema.TypeString, - Optional: true, - Default: nil, + Type: schema.TypeString, + Optional: true, + Default: nil, + Description: "Comments for this tenant (tenancy module).", }, "content_type": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "The content type of this tenant (tenancy module).", }, "custom_field": { Type: schema.TypeSet, @@ -39,37 +42,44 @@ func resourceNetboxTenancyTenant() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Name of the existing custom field.", }, "type": { Type: schema.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{"text", "integer", "boolean", "date", "url", "selection", "multiple"}, false), + Description: "Type of the existing custom field (text, integer, boolean, url, selection, multiple).", }, "value": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Value of the existing custom field.", }, }, }, + Description: "Existing custom fields to associate to this tenant (tenancy module).", }, "description": { Type: schema.TypeString, Optional: true, Default: nil, ValidateFunc: validation.StringLenBetween(1, 100), + Description: "The description for this tenant (tenancy module).", }, "tenant_group_id": { - Type: schema.TypeInt, - Optional: true, - Default: 0, + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "ID of the group where this tenant (tenancy module) is attached to.", }, "name": { Type: schema.TypeString, Required: true, ValidateFunc: validation.StringLenBetween(1, 30), + Description: "The name for this tenant (tenancy module).", }, "slug": { Type: schema.TypeString, @@ -77,6 +87,7 @@ func resourceNetboxTenancyTenant() *schema.Resource { ValidateFunc: validation.StringMatch( regexp.MustCompile("^[-a-zA-Z0-9_]{1,50}$"), "Must be like ^[-a-zA-Z0-9_]{1,50}$"), + Description: "The slug for this tenant (tenancy module).", }, "tag": { Type: schema.TypeSet, @@ -84,15 +95,18 @@ func resourceNetboxTenancyTenant() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Name of the existing tag.", }, "slug": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Slug of the existing tag.", }, }, }, + Description: "Existing tag to associate to this tenant (tenancy module).", }, }, } diff --git a/netbox/resource_netbox_tenancy_tenant_group.go b/netbox/resource_netbox_tenancy_tenant_group.go index 1e73dde90..131dc3ee7 100644 --- a/netbox/resource_netbox_tenancy_tenant_group.go +++ b/netbox/resource_netbox_tenancy_tenant_group.go @@ -14,24 +14,27 @@ import ( func resourceNetboxTenancyTenantGroup() *schema.Resource { return &schema.Resource{ - Create: resourceNetboxTenancyTenantGroupCreate, - Read: resourceNetboxTenancyTenantGroupRead, - Update: resourceNetboxTenancyTenantGroupUpdate, - Delete: resourceNetboxTenancyTenantGroupDelete, - Exists: resourceNetboxTenancyTenantGroupExists, + Description: "Manage a tenant group (tenancy module) within Netbox.", + Create: resourceNetboxTenancyTenantGroupCreate, + Read: resourceNetboxTenancyTenantGroupRead, + Update: resourceNetboxTenancyTenantGroupUpdate, + Delete: resourceNetboxTenancyTenantGroupDelete, + Exists: resourceNetboxTenancyTenantGroupExists, Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ "content_type": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "The content type of this tenant group (tenancy module).", }, "name": { Type: schema.TypeString, Required: true, ValidateFunc: validation.StringLenBetween(1, 50), + Description: "The name for this tenant group (tenancy module).", }, "slug": { Type: schema.TypeString, @@ -39,6 +42,7 @@ func resourceNetboxTenancyTenantGroup() *schema.Resource { ValidateFunc: validation.StringMatch( regexp.MustCompile("^[-a-zA-Z0-9_]{1,50}$"), "Must be like ^[-a-zA-Z0-9_]{1,50}$"), + Description: "The slug for this tenant group (tenancy module).", }, "tag": { Type: schema.TypeSet, @@ -46,15 +50,18 @@ func resourceNetboxTenancyTenantGroup() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Name of the existing tag.", }, "slug": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Slug of the existing tag.", }, }, }, + Description: "Existing tag to associate to this tenant group (tenancy module).", }, }, } diff --git a/netbox/resource_netbox_virtualization_interface.go b/netbox/resource_netbox_virtualization_interface.go index 7848c4b3e..d6e56ed64 100644 --- a/netbox/resource_netbox_virtualization_interface.go +++ b/netbox/resource_netbox_virtualization_interface.go @@ -14,19 +14,21 @@ import ( func resourceNetboxVirtualizationInterface() *schema.Resource { return &schema.Resource{ - Create: resourceNetboxVirtualizationInterfaceCreate, - Read: resourceNetboxVirtualizationInterfaceRead, - Update: resourceNetboxVirtualizationInterfaceUpdate, - Delete: resourceNetboxVirtualizationInterfaceDelete, - Exists: resourceNetboxVirtualizationInterfaceExists, + Description: "Manage an interface (virtualization module) resource within Netbox.", + Create: resourceNetboxVirtualizationInterfaceCreate, + Read: resourceNetboxVirtualizationInterfaceRead, + Update: resourceNetboxVirtualizationInterfaceUpdate, + Delete: resourceNetboxVirtualizationInterfaceDelete, + Exists: resourceNetboxVirtualizationInterfaceExists, Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ "content_type": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "The content type of this interface (virtualization module).", }, "custom_field": { Type: schema.TypeSet, @@ -34,32 +36,38 @@ func resourceNetboxVirtualizationInterface() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Name of the existing custom field.", }, "type": { Type: schema.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{"text", "integer", "boolean", "date", "url", "selection", "multiple"}, false), + Description: "Type of the existing custom field (text, integer, boolean, url, selection, multiple).", }, "value": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Value of the existing custom field.", }, }, }, + Description: "Existing custom fields to associate to this interface (virtualization module).", }, "description": { Type: schema.TypeString, Optional: true, Default: nil, ValidateFunc: validation.StringLenBetween(1, 200), + Description: "Description for this interface (virtualization module).", }, "enabled": { - Type: schema.TypeBool, - Optional: true, - Default: true, + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: "true or false (true by default)", }, "mac_address": { Type: schema.TypeString, @@ -67,32 +75,37 @@ func resourceNetboxVirtualizationInterface() *schema.Resource { ValidateFunc: validation.StringMatch( regexp.MustCompile("^([A-Z0-9]{2}:){5}[A-Z0-9]{2}$"), "Must be like AA:AA:AA:AA:AA"), - ForceNew: true, + ForceNew: true, + Description: "Mac address for this interface (virtualization module)", }, "mode": { Type: schema.TypeString, Optional: true, ValidateFunc: validation.StringInSlice([]string{"access", "tagged", "tagged-all"}, false), - ForceNew: true, + ForceNew: true, + Description: "The mode among access, tagged, tagged-all.", }, "mtu": { Type: schema.TypeInt, Optional: true, ValidateFunc: validation.IntBetween(1, 65536), ForceNew: true, + Description: "The MTU between 1 and 65536 for this interface (virtualization module).", }, "name": { Type: schema.TypeString, Required: true, ValidateFunc: validation.StringLenBetween(1, 64), + Description: "Description for this interface (virtualization module)", }, "tagged_vlans": { Type: schema.TypeSet, Elem: &schema.Schema{ Type: schema.TypeInt, }, - Optional: true, + Optional: true, + Description: "List of vlan id tagged for this interface (virtualization module)", }, "tag": { Type: schema.TypeSet, @@ -100,27 +113,33 @@ func resourceNetboxVirtualizationInterface() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Name of the existing tag.", }, "slug": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Slug of the existing tag.", }, }, }, + Description: "Existing tag to associate to this interface (virtualization module).", }, "type": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Type of interface among virtualization.vminterface for VM or dcim.interface for device", }, "untagged_vlan": { - Type: schema.TypeInt, - Optional: true, + Type: schema.TypeInt, + Optional: true, + Description: "Vlan ID untagged for this interface (virtualization module).", }, "virtualmachine_id": { - Type: schema.TypeInt, - Required: true, + Type: schema.TypeInt, + Required: true, + Description: "ID of the VM where this interface (virtualization module) is attached to.", }, }, } diff --git a/netbox/resource_netbox_virtualization_vm.go b/netbox/resource_netbox_virtualization_vm.go index 876ffb197..da7a59c14 100644 --- a/netbox/resource_netbox_virtualization_vm.go +++ b/netbox/resource_netbox_virtualization_vm.go @@ -16,28 +16,32 @@ import ( func resourceNetboxVirtualizationVM() *schema.Resource { return &schema.Resource{ - Create: resourceNetboxVirtualizationVMCreate, - Read: resourceNetboxVirtualizationVMRead, - Update: resourceNetboxVirtualizationVMUpdate, - Delete: resourceNetboxVirtualizationVMDelete, - Exists: resourceNetboxVirtualizationVMExists, + Description: "Manage a VM (virtualization module) resource within Netbox.", + Create: resourceNetboxVirtualizationVMCreate, + Read: resourceNetboxVirtualizationVMRead, + Update: resourceNetboxVirtualizationVMUpdate, + Delete: resourceNetboxVirtualizationVMDelete, + Exists: resourceNetboxVirtualizationVMExists, Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ "cluster_id": { - Type: schema.TypeInt, - Required: true, + Type: schema.TypeInt, + Required: true, + Description: "ID of the cluster which host this VM (virtualization module).", }, "comments": { - Type: schema.TypeString, - Optional: true, - Default: nil, + Type: schema.TypeString, + Optional: true, + Default: nil, + Description: "Comments for this VM (virtualization module).", }, "content_type": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "The content type of this VM (virtualization module).", }, "custom_field": { Type: schema.TypeSet, @@ -45,50 +49,60 @@ func resourceNetboxVirtualizationVM() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Name of the existing custom field.", }, "type": { Type: schema.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{"text", "integer", "boolean", "date", "url", "selection", "multiple"}, false), + Description: "Type of the existing custom field (text, integer, boolean, url, selection, multiple).", }, "value": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Value of the existing custom field.", }, }, }, + Description: "Existing custom fields to associate to this VM (virtualization module).", }, "disk": { - Type: schema.TypeInt, - Optional: true, - Default: 0, + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The size in GB of the disk for this VM (virtualization module).", }, "local_context_data": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + Description: "Local context data for this VM (virtualization module).", }, "memory": { - Type: schema.TypeInt, - Optional: true, - Default: 0, + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "The size in MB of the memory of this VM (virtualization module).", }, "name": { Type: schema.TypeString, Required: true, ValidateFunc: validation.StringLenBetween(1, 64), + Description: "The name for this VM (virtualization module).", }, "platform_id": { - Type: schema.TypeInt, - Optional: true, - Default: 0, + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "ID of the platform for this VM (virtualization module).", }, "role_id": { - Type: schema.TypeInt, - Optional: true, - Default: 0, + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "ID of the role for this VM (virtualization module).", }, "status": { Type: schema.TypeString, @@ -96,6 +110,7 @@ func resourceNetboxVirtualizationVM() *schema.Resource { Default: "active", ValidateFunc: validation.StringInSlice([]string{"offline", "active", "planned", "staged", "failed", "decommissioning"}, false), + Description: "The status among offline, active, planned, staged, failed or decommissioning (active by default).", }, "tag": { Type: schema.TypeSet, @@ -103,19 +118,23 @@ func resourceNetboxVirtualizationVM() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Name of the existing tag.", }, "slug": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Slug of the existing tag.", }, }, }, + Description: "Existing tag to associate to this VM (virtualization module).", }, "tenant_id": { - Type: schema.TypeInt, - Optional: true, + Type: schema.TypeInt, + Optional: true, + Description: "ID of the tenant where this VM (virtualization module) is attached.", }, "vcpus": { Type: schema.TypeString, @@ -129,6 +148,7 @@ func resourceNetboxVirtualizationVM() *schema.Resource { } return false }, + Description: "The number of VCPUS for this VM (virtualization module).", }, }, } diff --git a/templates/index.md.tmpl b/templates/index.md.tmpl new file mode 100644 index 000000000..21e20b4f0 --- /dev/null +++ b/templates/index.md.tmpl @@ -0,0 +1,26 @@ +--- +layout: "" +page_title: "Provider: Netbox" +description: |- + The Netbox provider provides resources to interact with Netbox application. +--- + +# terraform-provider-netbox Provider + +The Netbox provider provides resources to interact with [Netbox](https://netbox.readthedocs.io/en/stable/) application. + +## Compatibility with Netbox + +| Netbox version | Provider version | +|:--------------:|:----------------:| +| 2.8 | 0.x.y | +| 2.9 | 1.x.y | +| 2.11 | 2.x.y | +| 3.0 | 3.x.y | +| 3.1 | 4.x.y | + +## Example Usage + +{{tffile "examples/provider/provider.tf"}} + +{{ .SchemaMarkdown | trimspace }} diff --git a/tools/tools.go b/tools/tools.go new file mode 100644 index 000000000..f03ce3ddd --- /dev/null +++ b/tools/tools.go @@ -0,0 +1,8 @@ +// +build tools + +package tools + +import ( + // document generation + _ "github.com/hashicorp/terraform-plugin-docs/cmd/tfplugindocs" +) diff --git a/utils/generateJsonDatasources b/utils/generateJsonDatasources index 66b7d96f3..929110b68 100755 --- a/utils/generateJsonDatasources +++ b/utils/generateJsonDatasources @@ -70,7 +70,7 @@ cat ${FILE}.tmp | sort -u > ${FILE} rm ${FILE}.tmp find ${SCRIPT_PATH}/../netbox -name "*_json_*.go" -delete -find ${SCRIPT_PATH}/../docs/data-sources -name "json_*" -delete +rm -rf ${SCRIPT_PATH}/../examples/data-sources/netbox_json_* rm -f ${SCRIPT_PATH}/provider_update.txt while read -r line; do @@ -93,6 +93,7 @@ import ( func dataNetboxJSON${SECTION}${ITEM}List() *schema.Resource { return &schema.Resource{ + Description: "Get json output from the ${ENDPOINT}_list Netbox endpoint.", Read: dataNetboxJSON${SECTION}${ITEM}ListRead, Schema: map[string]*schema.Schema{ @@ -100,10 +101,12 @@ func dataNetboxJSON${SECTION}${ITEM}List() *schema.Resource { Type: schema.TypeInt, Optional: true, Default: 0, + Description: "The max number of returned results. If 0 is specified, all records will be returned.", }, "json": { Type: schema.TypeString, Computed: true, + Description: "JSON output of the list of objects for this Netbox endpoint.", }, }, } @@ -134,31 +137,16 @@ cat << EOF >> ${SCRIPT_PATH}/provider_update.txt "netbox_json_${ENDPOINT}_list": dataNetboxJSON${SECTION}${ITEM}List(), EOF -cat << EOF > ${SCRIPT_PATH}/../docs/data-sources/json_${ENDPOINT}_list.md -# netbox\_json\_`echo ${ENDPOINT} | sed 's/_/\\\_/g'`\_list Data Source +mkdir -p ${SCRIPT_PATH}/../examples/data-sources/netbox_json_${ENDPOINT}_list -Get json output from the ${ENDPOINT}_list Netbox endpoint - -## Example Usage - -\`\`\`hcl +cat << EOF > ${SCRIPT_PATH}/../examples/data-sources/netbox_json_${ENDPOINT}_list/data-source.tf data "netbox_json_${ENDPOINT}_list" "test" { limit = 0 } + output "example" { value = jsondecode(data.netbox_json_${ENDPOINT}_list.test.json) } -\`\`\` - -## Argument Reference - -* \`\`limit\`\` (Optional). The max number of returned results. If 0 is specified, all records will be returned. - -## Attributes Reference - -In addition to the above arguments, the following attributes are exported: -* \`\`json\`\` - JSON output of the list of objects for this Netbox endpoint. - EOF done < "$FILE" diff --git a/vendor/github.com/Masterminds/goutils/.travis.yml b/vendor/github.com/Masterminds/goutils/.travis.yml new file mode 100644 index 000000000..4025e01ec --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/.travis.yml @@ -0,0 +1,18 @@ +language: go + +go: + - 1.6 + - 1.7 + - 1.8 + - tip + +script: + - go test -v + +notifications: + webhooks: + urls: + - https://webhooks.gitter.im/e/06e3328629952dabe3e0 + on_success: change # options: [always|never|change] default: always + on_failure: always # options: [always|never|change] default: always + on_start: never # options: [always|never|change] default: always diff --git a/vendor/github.com/Masterminds/goutils/CHANGELOG.md b/vendor/github.com/Masterminds/goutils/CHANGELOG.md new file mode 100644 index 000000000..d700ec47f --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/CHANGELOG.md @@ -0,0 +1,8 @@ +# 1.0.1 (2017-05-31) + +## Fixed +- #21: Fix generation of alphanumeric strings (thanks @dbarranco) + +# 1.0.0 (2014-04-30) + +- Initial release. diff --git a/vendor/github.com/Masterminds/goutils/LICENSE.txt b/vendor/github.com/Masterminds/goutils/LICENSE.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Masterminds/goutils/README.md b/vendor/github.com/Masterminds/goutils/README.md new file mode 100644 index 000000000..163ffe72a --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/README.md @@ -0,0 +1,70 @@ +GoUtils +=========== +[![Stability: Maintenance](https://masterminds.github.io/stability/maintenance.svg)](https://masterminds.github.io/stability/maintenance.html) +[![GoDoc](https://godoc.org/github.com/Masterminds/goutils?status.png)](https://godoc.org/github.com/Masterminds/goutils) [![Build Status](https://travis-ci.org/Masterminds/goutils.svg?branch=master)](https://travis-ci.org/Masterminds/goutils) [![Build status](https://ci.appveyor.com/api/projects/status/sc2b1ew0m7f0aiju?svg=true)](https://ci.appveyor.com/project/mattfarina/goutils) + + +GoUtils provides users with utility functions to manipulate strings in various ways. It is a Go implementation of some +string manipulation libraries of Java Apache Commons. GoUtils includes the following Java Apache Commons classes: +* WordUtils +* RandomStringUtils +* StringUtils (partial implementation) + +## Installation +If you have Go set up on your system, from the GOPATH directory within the command line/terminal, enter this: + + go get github.com/Masterminds/goutils + +If you do not have Go set up on your system, please follow the [Go installation directions from the documenation](http://golang.org/doc/install), and then follow the instructions above to install GoUtils. + + +## Documentation +GoUtils doc is available here: [![GoDoc](https://godoc.org/github.com/Masterminds/goutils?status.png)](https://godoc.org/github.com/Masterminds/goutils) + + +## Usage +The code snippets below show examples of how to use GoUtils. Some functions return errors while others do not. The first instance below, which does not return an error, is the `Initials` function (located within the `wordutils.go` file). + + package main + + import ( + "fmt" + "github.com/Masterminds/goutils" + ) + + func main() { + + // EXAMPLE 1: A goutils function which returns no errors + fmt.Println (goutils.Initials("John Doe Foo")) // Prints out "JDF" + + } +Some functions return errors mainly due to illegal arguements used as parameters. The code example below illustrates how to deal with function that returns an error. In this instance, the function is the `Random` function (located within the `randomstringutils.go` file). + + package main + + import ( + "fmt" + "github.com/Masterminds/goutils" + ) + + func main() { + + // EXAMPLE 2: A goutils function which returns an error + rand1, err1 := goutils.Random (-1, 0, 0, true, true) + + if err1 != nil { + fmt.Println(err1) // Prints out error message because -1 was entered as the first parameter in goutils.Random(...) + } else { + fmt.Println(rand1) + } + + } + +## License +GoUtils is licensed under the Apache License, Version 2.0. Please check the LICENSE.txt file or visit http://www.apache.org/licenses/LICENSE-2.0 for a copy of the license. + +## Issue Reporting +Make suggestions or report issues using the Git issue tracker: https://github.com/Masterminds/goutils/issues + +## Website +* [GoUtils webpage](http://Masterminds.github.io/goutils/) diff --git a/vendor/github.com/Masterminds/goutils/appveyor.yml b/vendor/github.com/Masterminds/goutils/appveyor.yml new file mode 100644 index 000000000..657564a84 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/appveyor.yml @@ -0,0 +1,21 @@ +version: build-{build}.{branch} + +clone_folder: C:\gopath\src\github.com\Masterminds\goutils +shallow_clone: true + +environment: + GOPATH: C:\gopath + +platform: + - x64 + +build: off + +install: + - go version + - go env + +test_script: + - go test -v + +deploy: off diff --git a/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go b/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go new file mode 100644 index 000000000..8dbd92485 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go @@ -0,0 +1,230 @@ +/* +Copyright 2014 Alexander Okoli + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package goutils + +import ( + "crypto/rand" + "fmt" + "math" + "math/big" + "unicode" +) + +/* +CryptoRandomNonAlphaNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of all characters (ASCII/Unicode values between 0 to 2,147,483,647 (math.MaxInt32)). + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomNonAlphaNumeric(count int) (string, error) { + return CryptoRandomAlphaNumericCustom(count, false, false) +} + +/* +CryptoRandomAscii creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of characters whose ASCII value is between 32 and 126 (inclusive). + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomAscii(count int) (string, error) { + return CryptoRandom(count, 32, 127, false, false) +} + +/* +CryptoRandomNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of numeric characters. + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomNumeric(count int) (string, error) { + return CryptoRandom(count, 0, 0, false, true) +} + +/* +CryptoRandomAlphabetic creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. + +Parameters: + count - the length of random string to create + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomAlphabetic(count int) (string, error) { + return CryptoRandom(count, 0, 0, true, false) +} + +/* +CryptoRandomAlphaNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters. + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomAlphaNumeric(count int) (string, error) { + return CryptoRandom(count, 0, 0, true, true) +} + +/* +CryptoRandomAlphaNumericCustom creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. + +Parameters: + count - the length of random string to create + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomAlphaNumericCustom(count int, letters bool, numbers bool) (string, error) { + return CryptoRandom(count, 0, 0, letters, numbers) +} + +/* +CryptoRandom creates a random string based on a variety of options, using using golang's crypto/rand source of randomness. +If the parameters start and end are both 0, start and end are set to ' ' and 'z', the ASCII printable characters, will be used, +unless letters and numbers are both false, in which case, start and end are set to 0 and math.MaxInt32, respectively. +If chars is not nil, characters stored in chars that are between start and end are chosen. + +Parameters: + count - the length of random string to create + start - the position in set of chars (ASCII/Unicode int) to start at + end - the position in set of chars (ASCII/Unicode int) to end before + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. + +Returns: + string - the random string + error - an error stemming from invalid parameters: if count < 0; or the provided chars array is empty; or end <= start; or end > len(chars) +*/ +func CryptoRandom(count int, start int, end int, letters bool, numbers bool, chars ...rune) (string, error) { + if count == 0 { + return "", nil + } else if count < 0 { + err := fmt.Errorf("randomstringutils illegal argument: Requested random string length %v is less than 0.", count) // equiv to err := errors.New("...") + return "", err + } + if chars != nil && len(chars) == 0 { + err := fmt.Errorf("randomstringutils illegal argument: The chars array must not be empty") + return "", err + } + + if start == 0 && end == 0 { + if chars != nil { + end = len(chars) + } else { + if !letters && !numbers { + end = math.MaxInt32 + } else { + end = 'z' + 1 + start = ' ' + } + } + } else { + if end <= start { + err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) must be greater than start (%v)", end, start) + return "", err + } + + if chars != nil && end > len(chars) { + err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) cannot be greater than len(chars) (%v)", end, len(chars)) + return "", err + } + } + + buffer := make([]rune, count) + gap := end - start + + // high-surrogates range, (\uD800-\uDBFF) = 55296 - 56319 + // low-surrogates range, (\uDC00-\uDFFF) = 56320 - 57343 + + for count != 0 { + count-- + var ch rune + if chars == nil { + ch = rune(getCryptoRandomInt(gap) + int64(start)) + } else { + ch = chars[getCryptoRandomInt(gap)+int64(start)] + } + + if letters && unicode.IsLetter(ch) || numbers && unicode.IsDigit(ch) || !letters && !numbers { + if ch >= 56320 && ch <= 57343 { // low surrogate range + if count == 0 { + count++ + } else { + // Insert low surrogate + buffer[count] = ch + count-- + // Insert high surrogate + buffer[count] = rune(55296 + getCryptoRandomInt(128)) + } + } else if ch >= 55296 && ch <= 56191 { // High surrogates range (Partial) + if count == 0 { + count++ + } else { + // Insert low surrogate + buffer[count] = rune(56320 + getCryptoRandomInt(128)) + count-- + // Insert high surrogate + buffer[count] = ch + } + } else if ch >= 56192 && ch <= 56319 { + // private high surrogate, skip it + count++ + } else { + // not one of the surrogates* + buffer[count] = ch + } + } else { + count++ + } + } + return string(buffer), nil +} + +func getCryptoRandomInt(count int) int64 { + nBig, err := rand.Int(rand.Reader, big.NewInt(int64(count))) + if err != nil { + panic(err) + } + return nBig.Int64() +} diff --git a/vendor/github.com/Masterminds/goutils/randomstringutils.go b/vendor/github.com/Masterminds/goutils/randomstringutils.go new file mode 100644 index 000000000..272670231 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/randomstringutils.go @@ -0,0 +1,248 @@ +/* +Copyright 2014 Alexander Okoli + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package goutils + +import ( + "fmt" + "math" + "math/rand" + "time" + "unicode" +) + +// RANDOM provides the time-based seed used to generate random numbers +var RANDOM = rand.New(rand.NewSource(time.Now().UnixNano())) + +/* +RandomNonAlphaNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of all characters (ASCII/Unicode values between 0 to 2,147,483,647 (math.MaxInt32)). + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomNonAlphaNumeric(count int) (string, error) { + return RandomAlphaNumericCustom(count, false, false) +} + +/* +RandomAscii creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of characters whose ASCII value is between 32 and 126 (inclusive). + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomAscii(count int) (string, error) { + return Random(count, 32, 127, false, false) +} + +/* +RandomNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of numeric characters. + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomNumeric(count int) (string, error) { + return Random(count, 0, 0, false, true) +} + +/* +RandomAlphabetic creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alphabetic characters. + +Parameters: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomAlphabetic(count int) (string, error) { + return Random(count, 0, 0, true, false) +} + +/* +RandomAlphaNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters. + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomAlphaNumeric(count int) (string, error) { + return Random(count, 0, 0, true, true) +} + +/* +RandomAlphaNumericCustom creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. + +Parameters: + count - the length of random string to create + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomAlphaNumericCustom(count int, letters bool, numbers bool) (string, error) { + return Random(count, 0, 0, letters, numbers) +} + +/* +Random creates a random string based on a variety of options, using default source of randomness. +This method has exactly the same semantics as RandomSeed(int, int, int, bool, bool, []char, *rand.Rand), but +instead of using an externally supplied source of randomness, it uses the internal *rand.Rand instance. + +Parameters: + count - the length of random string to create + start - the position in set of chars (ASCII/Unicode int) to start at + end - the position in set of chars (ASCII/Unicode int) to end before + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func Random(count int, start int, end int, letters bool, numbers bool, chars ...rune) (string, error) { + return RandomSeed(count, start, end, letters, numbers, chars, RANDOM) +} + +/* +RandomSeed creates a random string based on a variety of options, using supplied source of randomness. +If the parameters start and end are both 0, start and end are set to ' ' and 'z', the ASCII printable characters, will be used, +unless letters and numbers are both false, in which case, start and end are set to 0 and math.MaxInt32, respectively. +If chars is not nil, characters stored in chars that are between start and end are chosen. +This method accepts a user-supplied *rand.Rand instance to use as a source of randomness. By seeding a single *rand.Rand instance +with a fixed seed and using it for each call, the same random sequence of strings can be generated repeatedly and predictably. + +Parameters: + count - the length of random string to create + start - the position in set of chars (ASCII/Unicode decimals) to start at + end - the position in set of chars (ASCII/Unicode decimals) to end before + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. + random - a source of randomness. + +Returns: + string - the random string + error - an error stemming from invalid parameters: if count < 0; or the provided chars array is empty; or end <= start; or end > len(chars) +*/ +func RandomSeed(count int, start int, end int, letters bool, numbers bool, chars []rune, random *rand.Rand) (string, error) { + + if count == 0 { + return "", nil + } else if count < 0 { + err := fmt.Errorf("randomstringutils illegal argument: Requested random string length %v is less than 0.", count) // equiv to err := errors.New("...") + return "", err + } + if chars != nil && len(chars) == 0 { + err := fmt.Errorf("randomstringutils illegal argument: The chars array must not be empty") + return "", err + } + + if start == 0 && end == 0 { + if chars != nil { + end = len(chars) + } else { + if !letters && !numbers { + end = math.MaxInt32 + } else { + end = 'z' + 1 + start = ' ' + } + } + } else { + if end <= start { + err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) must be greater than start (%v)", end, start) + return "", err + } + + if chars != nil && end > len(chars) { + err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) cannot be greater than len(chars) (%v)", end, len(chars)) + return "", err + } + } + + buffer := make([]rune, count) + gap := end - start + + // high-surrogates range, (\uD800-\uDBFF) = 55296 - 56319 + // low-surrogates range, (\uDC00-\uDFFF) = 56320 - 57343 + + for count != 0 { + count-- + var ch rune + if chars == nil { + ch = rune(random.Intn(gap) + start) + } else { + ch = chars[random.Intn(gap)+start] + } + + if letters && unicode.IsLetter(ch) || numbers && unicode.IsDigit(ch) || !letters && !numbers { + if ch >= 56320 && ch <= 57343 { // low surrogate range + if count == 0 { + count++ + } else { + // Insert low surrogate + buffer[count] = ch + count-- + // Insert high surrogate + buffer[count] = rune(55296 + random.Intn(128)) + } + } else if ch >= 55296 && ch <= 56191 { // High surrogates range (Partial) + if count == 0 { + count++ + } else { + // Insert low surrogate + buffer[count] = rune(56320 + random.Intn(128)) + count-- + // Insert high surrogate + buffer[count] = ch + } + } else if ch >= 56192 && ch <= 56319 { + // private high surrogate, skip it + count++ + } else { + // not one of the surrogates* + buffer[count] = ch + } + } else { + count++ + } + } + return string(buffer), nil +} diff --git a/vendor/github.com/Masterminds/goutils/stringutils.go b/vendor/github.com/Masterminds/goutils/stringutils.go new file mode 100644 index 000000000..741bb530e --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/stringutils.go @@ -0,0 +1,240 @@ +/* +Copyright 2014 Alexander Okoli + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package goutils + +import ( + "bytes" + "fmt" + "strings" + "unicode" +) + +// Typically returned by functions where a searched item cannot be found +const INDEX_NOT_FOUND = -1 + +/* +Abbreviate abbreviates a string using ellipses. This will turn the string "Now is the time for all good men" into "Now is the time for..." + +Specifically, the algorithm is as follows: + + - If str is less than maxWidth characters long, return it. + - Else abbreviate it to (str[0:maxWidth - 3] + "..."). + - If maxWidth is less than 4, return an illegal argument error. + - In no case will it return a string of length greater than maxWidth. + +Parameters: + str - the string to check + maxWidth - maximum length of result string, must be at least 4 + +Returns: + string - abbreviated string + error - if the width is too small +*/ +func Abbreviate(str string, maxWidth int) (string, error) { + return AbbreviateFull(str, 0, maxWidth) +} + +/* +AbbreviateFull abbreviates a string using ellipses. This will turn the string "Now is the time for all good men" into "...is the time for..." +This function works like Abbreviate(string, int), but allows you to specify a "left edge" offset. Note that this left edge is not +necessarily going to be the leftmost character in the result, or the first character following the ellipses, but it will appear +somewhere in the result. +In no case will it return a string of length greater than maxWidth. + +Parameters: + str - the string to check + offset - left edge of source string + maxWidth - maximum length of result string, must be at least 4 + +Returns: + string - abbreviated string + error - if the width is too small +*/ +func AbbreviateFull(str string, offset int, maxWidth int) (string, error) { + if str == "" { + return "", nil + } + if maxWidth < 4 { + err := fmt.Errorf("stringutils illegal argument: Minimum abbreviation width is 4") + return "", err + } + if len(str) <= maxWidth { + return str, nil + } + if offset > len(str) { + offset = len(str) + } + if len(str)-offset < (maxWidth - 3) { // 15 - 5 < 10 - 3 = 10 < 7 + offset = len(str) - (maxWidth - 3) + } + abrevMarker := "..." + if offset <= 4 { + return str[0:maxWidth-3] + abrevMarker, nil // str.substring(0, maxWidth - 3) + abrevMarker; + } + if maxWidth < 7 { + err := fmt.Errorf("stringutils illegal argument: Minimum abbreviation width with offset is 7") + return "", err + } + if (offset + maxWidth - 3) < len(str) { // 5 + (10-3) < 15 = 12 < 15 + abrevStr, _ := Abbreviate(str[offset:len(str)], (maxWidth - 3)) + return abrevMarker + abrevStr, nil // abrevMarker + abbreviate(str.substring(offset), maxWidth - 3); + } + return abrevMarker + str[(len(str)-(maxWidth-3)):len(str)], nil // abrevMarker + str.substring(str.length() - (maxWidth - 3)); +} + +/* +DeleteWhiteSpace deletes all whitespaces from a string as defined by unicode.IsSpace(rune). +It returns the string without whitespaces. + +Parameter: + str - the string to delete whitespace from, may be nil + +Returns: + the string without whitespaces +*/ +func DeleteWhiteSpace(str string) string { + if str == "" { + return str + } + sz := len(str) + var chs bytes.Buffer + count := 0 + for i := 0; i < sz; i++ { + ch := rune(str[i]) + if !unicode.IsSpace(ch) { + chs.WriteRune(ch) + count++ + } + } + if count == sz { + return str + } + return chs.String() +} + +/* +IndexOfDifference compares two strings, and returns the index at which the strings begin to differ. + +Parameters: + str1 - the first string + str2 - the second string + +Returns: + the index where str1 and str2 begin to differ; -1 if they are equal +*/ +func IndexOfDifference(str1 string, str2 string) int { + if str1 == str2 { + return INDEX_NOT_FOUND + } + if IsEmpty(str1) || IsEmpty(str2) { + return 0 + } + var i int + for i = 0; i < len(str1) && i < len(str2); i++ { + if rune(str1[i]) != rune(str2[i]) { + break + } + } + if i < len(str2) || i < len(str1) { + return i + } + return INDEX_NOT_FOUND +} + +/* +IsBlank checks if a string is whitespace or empty (""). Observe the following behavior: + + goutils.IsBlank("") = true + goutils.IsBlank(" ") = true + goutils.IsBlank("bob") = false + goutils.IsBlank(" bob ") = false + +Parameter: + str - the string to check + +Returns: + true - if the string is whitespace or empty ("") +*/ +func IsBlank(str string) bool { + strLen := len(str) + if str == "" || strLen == 0 { + return true + } + for i := 0; i < strLen; i++ { + if unicode.IsSpace(rune(str[i])) == false { + return false + } + } + return true +} + +/* +IndexOf returns the index of the first instance of sub in str, with the search beginning from the +index start point specified. -1 is returned if sub is not present in str. + +An empty string ("") will return -1 (INDEX_NOT_FOUND). A negative start position is treated as zero. +A start position greater than the string length returns -1. + +Parameters: + str - the string to check + sub - the substring to find + start - the start position; negative treated as zero + +Returns: + the first index where the sub string was found (always >= start) +*/ +func IndexOf(str string, sub string, start int) int { + + if start < 0 { + start = 0 + } + + if len(str) < start { + return INDEX_NOT_FOUND + } + + if IsEmpty(str) || IsEmpty(sub) { + return INDEX_NOT_FOUND + } + + partialIndex := strings.Index(str[start:len(str)], sub) + if partialIndex == -1 { + return INDEX_NOT_FOUND + } + return partialIndex + start +} + +// IsEmpty checks if a string is empty (""). Returns true if empty, and false otherwise. +func IsEmpty(str string) bool { + return len(str) == 0 +} + +// Returns either the passed in string, or if the string is empty, the value of defaultStr. +func DefaultString(str string, defaultStr string) string { + if IsEmpty(str) { + return defaultStr + } + return str +} + +// Returns either the passed in string, or if the string is whitespace, empty (""), the value of defaultStr. +func DefaultIfBlank(str string, defaultStr string) string { + if IsBlank(str) { + return defaultStr + } + return str +} diff --git a/vendor/github.com/Masterminds/goutils/wordutils.go b/vendor/github.com/Masterminds/goutils/wordutils.go new file mode 100644 index 000000000..034cad8e2 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/wordutils.go @@ -0,0 +1,357 @@ +/* +Copyright 2014 Alexander Okoli + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package goutils provides utility functions to manipulate strings in various ways. +The code snippets below show examples of how to use goutils. Some functions return +errors while others do not, so usage would vary as a result. + +Example: + + package main + + import ( + "fmt" + "github.com/aokoli/goutils" + ) + + func main() { + + // EXAMPLE 1: A goutils function which returns no errors + fmt.Println (goutils.Initials("John Doe Foo")) // Prints out "JDF" + + + + // EXAMPLE 2: A goutils function which returns an error + rand1, err1 := goutils.Random (-1, 0, 0, true, true) + + if err1 != nil { + fmt.Println(err1) // Prints out error message because -1 was entered as the first parameter in goutils.Random(...) + } else { + fmt.Println(rand1) + } + } +*/ +package goutils + +import ( + "bytes" + "strings" + "unicode" +) + +// VERSION indicates the current version of goutils +const VERSION = "1.0.0" + +/* +Wrap wraps a single line of text, identifying words by ' '. +New lines will be separated by '\n'. Very long words, such as URLs will not be wrapped. +Leading spaces on a new line are stripped. Trailing spaces are not stripped. + +Parameters: + str - the string to be word wrapped + wrapLength - the column (a column can fit only one character) to wrap the words at, less than 1 is treated as 1 + +Returns: + a line with newlines inserted +*/ +func Wrap(str string, wrapLength int) string { + return WrapCustom(str, wrapLength, "", false) +} + +/* +WrapCustom wraps a single line of text, identifying words by ' '. +Leading spaces on a new line are stripped. Trailing spaces are not stripped. + +Parameters: + str - the string to be word wrapped + wrapLength - the column number (a column can fit only one character) to wrap the words at, less than 1 is treated as 1 + newLineStr - the string to insert for a new line, "" uses '\n' + wrapLongWords - true if long words (such as URLs) should be wrapped + +Returns: + a line with newlines inserted +*/ +func WrapCustom(str string, wrapLength int, newLineStr string, wrapLongWords bool) string { + + if str == "" { + return "" + } + if newLineStr == "" { + newLineStr = "\n" // TODO Assumes "\n" is seperator. Explore SystemUtils.LINE_SEPARATOR from Apache Commons + } + if wrapLength < 1 { + wrapLength = 1 + } + + inputLineLength := len(str) + offset := 0 + + var wrappedLine bytes.Buffer + + for inputLineLength-offset > wrapLength { + + if rune(str[offset]) == ' ' { + offset++ + continue + } + + end := wrapLength + offset + 1 + spaceToWrapAt := strings.LastIndex(str[offset:end], " ") + offset + + if spaceToWrapAt >= offset { + // normal word (not longer than wrapLength) + wrappedLine.WriteString(str[offset:spaceToWrapAt]) + wrappedLine.WriteString(newLineStr) + offset = spaceToWrapAt + 1 + + } else { + // long word or URL + if wrapLongWords { + end := wrapLength + offset + // long words are wrapped one line at a time + wrappedLine.WriteString(str[offset:end]) + wrappedLine.WriteString(newLineStr) + offset += wrapLength + } else { + // long words aren't wrapped, just extended beyond limit + end := wrapLength + offset + index := strings.IndexRune(str[end:len(str)], ' ') + if index == -1 { + wrappedLine.WriteString(str[offset:len(str)]) + offset = inputLineLength + } else { + spaceToWrapAt = index + end + wrappedLine.WriteString(str[offset:spaceToWrapAt]) + wrappedLine.WriteString(newLineStr) + offset = spaceToWrapAt + 1 + } + } + } + } + + wrappedLine.WriteString(str[offset:len(str)]) + + return wrappedLine.String() + +} + +/* +Capitalize capitalizes all the delimiter separated words in a string. Only the first letter of each word is changed. +To convert the rest of each word to lowercase at the same time, use CapitalizeFully(str string, delimiters ...rune). +The delimiters represent a set of characters understood to separate words. The first string character +and the first non-delimiter character after a delimiter will be capitalized. A "" input string returns "". +Capitalization uses the Unicode title case, normally equivalent to upper case. + +Parameters: + str - the string to capitalize + delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter + +Returns: + capitalized string +*/ +func Capitalize(str string, delimiters ...rune) string { + + var delimLen int + + if delimiters == nil { + delimLen = -1 + } else { + delimLen = len(delimiters) + } + + if str == "" || delimLen == 0 { + return str + } + + buffer := []rune(str) + capitalizeNext := true + for i := 0; i < len(buffer); i++ { + ch := buffer[i] + if isDelimiter(ch, delimiters...) { + capitalizeNext = true + } else if capitalizeNext { + buffer[i] = unicode.ToTitle(ch) + capitalizeNext = false + } + } + return string(buffer) + +} + +/* +CapitalizeFully converts all the delimiter separated words in a string into capitalized words, that is each word is made up of a +titlecase character and then a series of lowercase characters. The delimiters represent a set of characters understood +to separate words. The first string character and the first non-delimiter character after a delimiter will be capitalized. +Capitalization uses the Unicode title case, normally equivalent to upper case. + +Parameters: + str - the string to capitalize fully + delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter + +Returns: + capitalized string +*/ +func CapitalizeFully(str string, delimiters ...rune) string { + + var delimLen int + + if delimiters == nil { + delimLen = -1 + } else { + delimLen = len(delimiters) + } + + if str == "" || delimLen == 0 { + return str + } + str = strings.ToLower(str) + return Capitalize(str, delimiters...) +} + +/* +Uncapitalize uncapitalizes all the whitespace separated words in a string. Only the first letter of each word is changed. +The delimiters represent a set of characters understood to separate words. The first string character and the first non-delimiter +character after a delimiter will be uncapitalized. Whitespace is defined by unicode.IsSpace(char). + +Parameters: + str - the string to uncapitalize fully + delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter + +Returns: + uncapitalized string +*/ +func Uncapitalize(str string, delimiters ...rune) string { + + var delimLen int + + if delimiters == nil { + delimLen = -1 + } else { + delimLen = len(delimiters) + } + + if str == "" || delimLen == 0 { + return str + } + + buffer := []rune(str) + uncapitalizeNext := true // TODO Always makes capitalize/un apply to first char. + for i := 0; i < len(buffer); i++ { + ch := buffer[i] + if isDelimiter(ch, delimiters...) { + uncapitalizeNext = true + } else if uncapitalizeNext { + buffer[i] = unicode.ToLower(ch) + uncapitalizeNext = false + } + } + return string(buffer) +} + +/* +SwapCase swaps the case of a string using a word based algorithm. + +Conversion algorithm: + + Upper case character converts to Lower case + Title case character converts to Lower case + Lower case character after Whitespace or at start converts to Title case + Other Lower case character converts to Upper case + Whitespace is defined by unicode.IsSpace(char). + +Parameters: + str - the string to swap case + +Returns: + the changed string +*/ +func SwapCase(str string) string { + if str == "" { + return str + } + buffer := []rune(str) + + whitespace := true + + for i := 0; i < len(buffer); i++ { + ch := buffer[i] + if unicode.IsUpper(ch) { + buffer[i] = unicode.ToLower(ch) + whitespace = false + } else if unicode.IsTitle(ch) { + buffer[i] = unicode.ToLower(ch) + whitespace = false + } else if unicode.IsLower(ch) { + if whitespace { + buffer[i] = unicode.ToTitle(ch) + whitespace = false + } else { + buffer[i] = unicode.ToUpper(ch) + } + } else { + whitespace = unicode.IsSpace(ch) + } + } + return string(buffer) +} + +/* +Initials extracts the initial letters from each word in the string. The first letter of the string and all first +letters after the defined delimiters are returned as a new string. Their case is not changed. If the delimiters +parameter is excluded, then Whitespace is used. Whitespace is defined by unicode.IsSpacea(char). An empty delimiter array returns an empty string. + +Parameters: + str - the string to get initials from + delimiters - set of characters to determine words, exclusion of this parameter means whitespace would be delimeter +Returns: + string of initial letters +*/ +func Initials(str string, delimiters ...rune) string { + if str == "" { + return str + } + if delimiters != nil && len(delimiters) == 0 { + return "" + } + strLen := len(str) + var buf bytes.Buffer + lastWasGap := true + for i := 0; i < strLen; i++ { + ch := rune(str[i]) + + if isDelimiter(ch, delimiters...) { + lastWasGap = true + } else if lastWasGap { + buf.WriteRune(ch) + lastWasGap = false + } + } + return buf.String() +} + +// private function (lower case func name) +func isDelimiter(ch rune, delimiters ...rune) bool { + if delimiters == nil { + return unicode.IsSpace(ch) + } + for _, delimiter := range delimiters { + if ch == delimiter { + return true + } + } + return false +} diff --git a/vendor/github.com/Masterminds/semver/v3/.gitignore b/vendor/github.com/Masterminds/semver/v3/.gitignore new file mode 100644 index 000000000..6b061e617 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/.gitignore @@ -0,0 +1 @@ +_fuzz/ \ No newline at end of file diff --git a/vendor/github.com/Masterminds/semver/v3/.golangci.yml b/vendor/github.com/Masterminds/semver/v3/.golangci.yml new file mode 100644 index 000000000..fdbdf1448 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/.golangci.yml @@ -0,0 +1,26 @@ +run: + deadline: 2m + +linters: + disable-all: true + enable: + - deadcode + - dupl + - errcheck + - gofmt + - goimports + - golint + - gosimple + - govet + - ineffassign + - misspell + - nakedret + - structcheck + - unused + - varcheck + +linters-settings: + gofmt: + simplify: true + dupl: + threshold: 400 diff --git a/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md new file mode 100644 index 000000000..1f90c38d2 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md @@ -0,0 +1,194 @@ +# Changelog + +## 3.1.1 (2020-11-23) + +### Fixed + +- #158: Fixed issue with generated regex operation order that could cause problem + +## 3.1.0 (2020-04-15) + +### Added + +- #131: Add support for serializing/deserializing SQL (thanks @ryancurrah) + +### Changed + +- #148: More accurate validation messages on constraints + +## 3.0.3 (2019-12-13) + +### Fixed + +- #141: Fixed issue with <= comparison + +## 3.0.2 (2019-11-14) + +### Fixed + +- #134: Fixed broken constraint checking with ^0.0 (thanks @krmichelos) + +## 3.0.1 (2019-09-13) + +### Fixed + +- #125: Fixes issue with module path for v3 + +## 3.0.0 (2019-09-12) + +This is a major release of the semver package which includes API changes. The Go +API is compatible with ^1. The Go API was not changed because many people are using +`go get` without Go modules for their applications and API breaking changes cause +errors which we have or would need to support. + +The changes in this release are the handling based on the data passed into the +functions. These are described in the added and changed sections below. + +### Added + +- StrictNewVersion function. This is similar to NewVersion but will return an + error if the version passed in is not a strict semantic version. For example, + 1.2.3 would pass but v1.2.3 or 1.2 would fail because they are not strictly + speaking semantic versions. This function is faster, performs fewer operations, + and uses fewer allocations than NewVersion. +- Fuzzing has been performed on NewVersion, StrictNewVersion, and NewConstraint. + The Makefile contains the operations used. For more information on you can start + on Wikipedia at https://en.wikipedia.org/wiki/Fuzzing +- Now using Go modules + +### Changed + +- NewVersion has proper prerelease and metadata validation with error messages + to signal an issue with either of them +- ^ now operates using a similar set of rules to npm/js and Rust/Cargo. If the + version is >=1 the ^ ranges works the same as v1. For major versions of 0 the + rules have changed. The minor version is treated as the stable version unless + a patch is specified and then it is equivalent to =. One difference from npm/js + is that prereleases there are only to a specific version (e.g. 1.2.3). + Prereleases here look over multiple versions and follow semantic version + ordering rules. This pattern now follows along with the expected and requested + handling of this packaged by numerous users. + +## 1.5.0 (2019-09-11) + +### Added + +- #103: Add basic fuzzing for `NewVersion()` (thanks @jesse-c) + +### Changed + +- #82: Clarify wildcard meaning in range constraints and update tests for it (thanks @greysteil) +- #83: Clarify caret operator range for pre-1.0.0 dependencies (thanks @greysteil) +- #72: Adding docs comment pointing to vert for a cli +- #71: Update the docs on pre-release comparator handling +- #89: Test with new go versions (thanks @thedevsaddam) +- #87: Added $ to ValidPrerelease for better validation (thanks @jeremycarroll) + +### Fixed + +- #78: Fix unchecked error in example code (thanks @ravron) +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case +- #97: Fixed copyright file for proper display on GitHub +- #107: Fix handling prerelease when sorting alphanum and num +- #109: Fixed where Validate sometimes returns wrong message on error + +## 1.4.2 (2018-04-10) + +### Changed + +- #72: Updated the docs to point to vert for a console appliaction +- #71: Update the docs on pre-release comparator handling + +### Fixed + +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case + +## 1.4.1 (2018-04-02) + +### Fixed + +- Fixed #64: Fix pre-release precedence issue (thanks @uudashr) + +## 1.4.0 (2017-10-04) + +### Changed + +- #61: Update NewVersion to parse ints with a 64bit int size (thanks @zknill) + +## 1.3.1 (2017-07-10) + +### Fixed + +- Fixed #57: number comparisons in prerelease sometimes inaccurate + +## 1.3.0 (2017-05-02) + +### Added + +- #45: Added json (un)marshaling support (thanks @mh-cbon) +- Stability marker. See https://masterminds.github.io/stability/ + +### Fixed + +- #51: Fix handling of single digit tilde constraint (thanks @dgodd) + +### Changed + +- #55: The godoc icon moved from png to svg + +## 1.2.3 (2017-04-03) + +### Fixed + +- #46: Fixed 0.x.x and 0.0.x in constraints being treated as * + +## Release 1.2.2 (2016-12-13) + +### Fixed + +- #34: Fixed issue where hyphen range was not working with pre-release parsing. + +## Release 1.2.1 (2016-11-28) + +### Fixed + +- #24: Fixed edge case issue where constraint "> 0" does not handle "0.0.1-alpha" + properly. + +## Release 1.2.0 (2016-11-04) + +### Added + +- #20: Added MustParse function for versions (thanks @adamreese) +- #15: Added increment methods on versions (thanks @mh-cbon) + +### Fixed + +- Issue #21: Per the SemVer spec (section 9) a pre-release is unstable and + might not satisfy the intended compatibility. The change here ignores pre-releases + on constraint checks (e.g., ~ or ^) when a pre-release is not part of the + constraint. For example, `^1.2.3` will ignore pre-releases while + `^1.2.3-alpha` will include them. + +## Release 1.1.1 (2016-06-30) + +### Changed + +- Issue #9: Speed up version comparison performance (thanks @sdboyer) +- Issue #8: Added benchmarks (thanks @sdboyer) +- Updated Go Report Card URL to new location +- Updated Readme to add code snippet formatting (thanks @mh-cbon) +- Updating tagging to v[SemVer] structure for compatibility with other tools. + +## Release 1.1.0 (2016-03-11) + +- Issue #2: Implemented validation to provide reasons a versions failed a + constraint. + +## Release 1.0.1 (2015-12-31) + +- Fixed #1: * constraint failing on valid versions. + +## Release 1.0.0 (2015-10-20) + +- Initial release diff --git a/vendor/github.com/Masterminds/semver/v3/LICENSE.txt b/vendor/github.com/Masterminds/semver/v3/LICENSE.txt new file mode 100644 index 000000000..9ff7da9c4 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (C) 2014-2019, Matt Butcher and Matt Farina + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Masterminds/semver/v3/Makefile b/vendor/github.com/Masterminds/semver/v3/Makefile new file mode 100644 index 000000000..eac19178f --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/Makefile @@ -0,0 +1,37 @@ +GOPATH=$(shell go env GOPATH) +GOLANGCI_LINT=$(GOPATH)/bin/golangci-lint +GOFUZZBUILD = $(GOPATH)/bin/go-fuzz-build +GOFUZZ = $(GOPATH)/bin/go-fuzz + +.PHONY: lint +lint: $(GOLANGCI_LINT) + @echo "==> Linting codebase" + @$(GOLANGCI_LINT) run + +.PHONY: test +test: + @echo "==> Running tests" + GO111MODULE=on go test -v + +.PHONY: test-cover +test-cover: + @echo "==> Running Tests with coverage" + GO111MODULE=on go test -cover . + +.PHONY: fuzz +fuzz: $(GOFUZZBUILD) $(GOFUZZ) + @echo "==> Fuzz testing" + $(GOFUZZBUILD) + $(GOFUZZ) -workdir=_fuzz + +$(GOLANGCI_LINT): + # Install golangci-lint. The configuration for it is in the .golangci.yml + # file in the root of the repository + echo ${GOPATH} + curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.17.1 + +$(GOFUZZBUILD): + cd / && go get -u github.com/dvyukov/go-fuzz/go-fuzz-build + +$(GOFUZZ): + cd / && go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-dep \ No newline at end of file diff --git a/vendor/github.com/Masterminds/semver/v3/README.md b/vendor/github.com/Masterminds/semver/v3/README.md new file mode 100644 index 000000000..d8f54dcbd --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/README.md @@ -0,0 +1,244 @@ +# SemVer + +The `semver` package provides the ability to work with [Semantic Versions](http://semver.org) in Go. Specifically it provides the ability to: + +* Parse semantic versions +* Sort semantic versions +* Check if a semantic version fits within a set of constraints +* Optionally work with a `v` prefix + +[![Stability: +Active](https://masterminds.github.io/stability/active.svg)](https://masterminds.github.io/stability/active.html) +[![](https://github.com/Masterminds/semver/workflows/Tests/badge.svg)](https://github.com/Masterminds/semver/actions) +[![GoDoc](https://img.shields.io/static/v1?label=godoc&message=reference&color=blue)](https://pkg.go.dev/github.com/Masterminds/semver/v3) +[![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/semver)](https://goreportcard.com/report/github.com/Masterminds/semver) + +If you are looking for a command line tool for version comparisons please see +[vert](https://github.com/Masterminds/vert) which uses this library. + +## Package Versions + +There are three major versions fo the `semver` package. + +* 3.x.x is the new stable and active version. This version is focused on constraint + compatibility for range handling in other tools from other languages. It has + a similar API to the v1 releases. The development of this version is on the master + branch. The documentation for this version is below. +* 2.x was developed primarily for [dep](https://github.com/golang/dep). There are + no tagged releases and the development was performed by [@sdboyer](https://github.com/sdboyer). + There are API breaking changes from v1. This version lives on the [2.x branch](https://github.com/Masterminds/semver/tree/2.x). +* 1.x.x is the most widely used version with numerous tagged releases. This is the + previous stable and is still maintained for bug fixes. The development, to fix + bugs, occurs on the release-1 branch. You can read the documentation [here](https://github.com/Masterminds/semver/blob/release-1/README.md). + +## Parsing Semantic Versions + +There are two functions that can parse semantic versions. The `StrictNewVersion` +function only parses valid version 2 semantic versions as outlined in the +specification. The `NewVersion` function attempts to coerce a version into a +semantic version and parse it. For example, if there is a leading v or a version +listed without all 3 parts (e.g. `v1.2`) it will attempt to coerce it into a valid +semantic version (e.g., 1.2.0). In both cases a `Version` object is returned +that can be sorted, compared, and used in constraints. + +When parsing a version an error is returned if there is an issue parsing the +version. For example, + + v, err := semver.NewVersion("1.2.3-beta.1+build345") + +The version object has methods to get the parts of the version, compare it to +other versions, convert the version back into a string, and get the original +string. Getting the original string is useful if the semantic version was coerced +into a valid form. + +## Sorting Semantic Versions + +A set of versions can be sorted using the `sort` package from the standard library. +For example, + +```go +raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} +vs := make([]*semver.Version, len(raw)) +for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v +} + +sort.Sort(semver.Collection(vs)) +``` + +## Checking Version Constraints + +There are two methods for comparing versions. One uses comparison methods on +`Version` instances and the other uses `Constraints`. There are some important +differences to notes between these two methods of comparison. + +1. When two versions are compared using functions such as `Compare`, `LessThan`, + and others it will follow the specification and always include prereleases + within the comparison. It will provide an answer that is valid with the + comparison section of the spec at https://semver.org/#spec-item-11 +2. When constraint checking is used for checks or validation it will follow a + different set of rules that are common for ranges with tools like npm/js + and Rust/Cargo. This includes considering prereleases to be invalid if the + ranges does not include one. If you want to have it include pre-releases a + simple solution is to include `-0` in your range. +3. Constraint ranges can have some complex rules including the shorthand use of + ~ and ^. For more details on those see the options below. + +There are differences between the two methods or checking versions because the +comparison methods on `Version` follow the specification while comparison ranges +are not part of the specification. Different packages and tools have taken it +upon themselves to come up with range rules. This has resulted in differences. +For example, npm/js and Cargo/Rust follow similar patterns while PHP has a +different pattern for ^. The comparison features in this package follow the +npm/js and Cargo/Rust lead because applications using it have followed similar +patters with their versions. + +Checking a version against version constraints is one of the most featureful +parts of the package. + +```go +c, err := semver.NewConstraint(">= 1.2.3") +if err != nil { + // Handle constraint not being parsable. +} + +v, err := semver.NewVersion("1.3") +if err != nil { + // Handle version not being parsable. +} +// Check if the version meets the constraints. The a variable will be true. +a := c.Check(v) +``` + +### Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of space or comma separated AND comparisons. These are then separated by || (OR) +comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. + +The basic comparisons are: + +* `=`: equal (aliased to no operator) +* `!=`: not equal +* `>`: greater than +* `<`: less than +* `>=`: greater than or equal to +* `<=`: less than or equal to + +### Working With Prerelease Versions + +Pre-releases, for those not familiar with them, are used for software releases +prior to stable or generally available releases. Examples of prereleases include +development, alpha, beta, and release candidate releases. A prerelease may be +a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the +order of precedence, prereleases come before their associated releases. In this +example `1.2.3-beta.1 < 1.2.3`. + +According to the Semantic Version specification prereleases may not be +API compliant with their release counterpart. It says, + +> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version. + +SemVer comparisons using constraints without a prerelease comparator will skip +prerelease versions. For example, `>=1.2.3` will skip prereleases when looking +at a list of releases while `>=1.2.3-0` will evaluate and find prereleases. + +The reason for the `0` as a pre-release version in the example comparison is +because pre-releases can only contain ASCII alphanumerics and hyphens (along with +`.` separators), per the spec. Sorting happens in ASCII sort order, again per the +spec. The lowest character is a `0` in ASCII sort order +(see an [ASCII Table](http://www.asciitable.com/)) + +Understanding ASCII sort ordering is important because A-Z comes before a-z. That +means `>=1.2.3-BETA` will return `1.2.3-alpha`. What you might expect from case +sensitivity doesn't apply here. This is due to ASCII sort ordering which is what +the spec specifies. + +### Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + +* `1.2 - 1.4.5` which is equivalent to `>= 1.2 <= 1.4.5` +* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5` + +### Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the patch level comparison (see tilde below). For example, + +* `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `>= 1.2.x` is equivalent to `>= 1.2.0` +* `<= 2.x` is equivalent to `< 3` +* `*` is equivalent to `>= 0.0.0` + +### Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + +* `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0` +* `~1` is equivalent to `>= 1, < 2` +* `~2.3` is equivalent to `>= 2.3, < 2.4` +* `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `~1.x` is equivalent to `>= 1, < 2` + +### Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes once a stable +(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts +as the API stability level. This is useful when comparisons of API versions as a +major change is API breaking. For example, + +* `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` +* `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` +* `^2.3` is equivalent to `>= 2.3, < 3` +* `^2.x` is equivalent to `>= 2.0.0, < 3` +* `^0.2.3` is equivalent to `>=0.2.3 <0.3.0` +* `^0.2` is equivalent to `>=0.2.0 <0.3.0` +* `^0.0.3` is equivalent to `>=0.0.3 <0.0.4` +* `^0.0` is equivalent to `>=0.0.0 <0.1.0` +* `^0` is equivalent to `>=0.0.0 <1.0.0` + +## Validation + +In addition to testing a version against a constraint, a version can be validated +against a constraint. When validation fails a slice of errors containing why a +version didn't meet the constraint is returned. For example, + +```go +c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") +if err != nil { + // Handle constraint not being parseable. +} + +v, err := semver.NewVersion("1.3") +if err != nil { + // Handle version not being parseable. +} + +// Validate a version against a constraint. +a, msgs := c.Validate(v) +// a is false +for _, m := range msgs { + fmt.Println(m) + + // Loops over the errors which would read + // "1.3 is greater than 1.2.3" + // "1.3 is less than 1.4" +} +``` + +## Contribute + +If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues) +or [create a pull request](https://github.com/Masterminds/semver/pulls). diff --git a/vendor/github.com/Masterminds/semver/v3/collection.go b/vendor/github.com/Masterminds/semver/v3/collection.go new file mode 100644 index 000000000..a78235895 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/collection.go @@ -0,0 +1,24 @@ +package semver + +// Collection is a collection of Version instances and implements the sort +// interface. See the sort package for more details. +// https://golang.org/pkg/sort/ +type Collection []*Version + +// Len returns the length of a collection. The number of Version instances +// on the slice. +func (c Collection) Len() int { + return len(c) +} + +// Less is needed for the sort interface to compare two Version objects on the +// slice. If checks if one is less than the other. +func (c Collection) Less(i, j int) bool { + return c[i].LessThan(c[j]) +} + +// Swap is needed for the sort interface to replace the Version objects +// at two different positions in the slice. +func (c Collection) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} diff --git a/vendor/github.com/Masterminds/semver/v3/constraints.go b/vendor/github.com/Masterminds/semver/v3/constraints.go new file mode 100644 index 000000000..547613f04 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/constraints.go @@ -0,0 +1,568 @@ +package semver + +import ( + "bytes" + "errors" + "fmt" + "regexp" + "strings" +) + +// Constraints is one or more constraint that a semantic version can be +// checked against. +type Constraints struct { + constraints [][]*constraint +} + +// NewConstraint returns a Constraints instance that a Version instance can +// be checked against. If there is a parse error it will be returned. +func NewConstraint(c string) (*Constraints, error) { + + // Rewrite - ranges into a comparison operation. + c = rewriteRange(c) + + ors := strings.Split(c, "||") + or := make([][]*constraint, len(ors)) + for k, v := range ors { + + // TODO: Find a way to validate and fetch all the constraints in a simpler form + + // Validate the segment + if !validConstraintRegex.MatchString(v) { + return nil, fmt.Errorf("improper constraint: %s", v) + } + + cs := findConstraintRegex.FindAllString(v, -1) + if cs == nil { + cs = append(cs, v) + } + result := make([]*constraint, len(cs)) + for i, s := range cs { + pc, err := parseConstraint(s) + if err != nil { + return nil, err + } + + result[i] = pc + } + or[k] = result + } + + o := &Constraints{constraints: or} + return o, nil +} + +// Check tests if a version satisfies the constraints. +func (cs Constraints) Check(v *Version) bool { + // TODO(mattfarina): For v4 of this library consolidate the Check and Validate + // functions as the underlying functions make that possible now. + // loop over the ORs and check the inner ANDs + for _, o := range cs.constraints { + joy := true + for _, c := range o { + if check, _ := c.check(v); !check { + joy = false + break + } + } + + if joy { + return true + } + } + + return false +} + +// Validate checks if a version satisfies a constraint. If not a slice of +// reasons for the failure are returned in addition to a bool. +func (cs Constraints) Validate(v *Version) (bool, []error) { + // loop over the ORs and check the inner ANDs + var e []error + + // Capture the prerelease message only once. When it happens the first time + // this var is marked + var prerelesase bool + for _, o := range cs.constraints { + joy := true + for _, c := range o { + // Before running the check handle the case there the version is + // a prerelease and the check is not searching for prereleases. + if c.con.pre == "" && v.pre != "" { + if !prerelesase { + em := fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + e = append(e, em) + prerelesase = true + } + joy = false + + } else { + + if _, err := c.check(v); err != nil { + e = append(e, err) + joy = false + } + } + } + + if joy { + return true, []error{} + } + } + + return false, e +} + +func (cs Constraints) String() string { + buf := make([]string, len(cs.constraints)) + var tmp bytes.Buffer + + for k, v := range cs.constraints { + tmp.Reset() + vlen := len(v) + for kk, c := range v { + tmp.WriteString(c.string()) + + // Space separate the AND conditions + if vlen > 1 && kk < vlen-1 { + tmp.WriteString(" ") + } + } + buf[k] = tmp.String() + } + + return strings.Join(buf, " || ") +} + +var constraintOps map[string]cfunc +var constraintRegex *regexp.Regexp +var constraintRangeRegex *regexp.Regexp + +// Used to find individual constraints within a multi-constraint string +var findConstraintRegex *regexp.Regexp + +// Used to validate an segment of ANDs is valid +var validConstraintRegex *regexp.Regexp + +const cvRegex string = `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + +func init() { + constraintOps = map[string]cfunc{ + "": constraintTildeOrEqual, + "=": constraintTildeOrEqual, + "!=": constraintNotEqual, + ">": constraintGreaterThan, + "<": constraintLessThan, + ">=": constraintGreaterThanEqual, + "=>": constraintGreaterThanEqual, + "<=": constraintLessThanEqual, + "=<": constraintLessThanEqual, + "~": constraintTilde, + "~>": constraintTilde, + "^": constraintCaret, + } + + ops := `=||!=|>|<|>=|=>|<=|=<|~|~>|\^` + + constraintRegex = regexp.MustCompile(fmt.Sprintf( + `^\s*(%s)\s*(%s)\s*$`, + ops, + cvRegex)) + + constraintRangeRegex = regexp.MustCompile(fmt.Sprintf( + `\s*(%s)\s+-\s+(%s)\s*`, + cvRegex, cvRegex)) + + findConstraintRegex = regexp.MustCompile(fmt.Sprintf( + `(%s)\s*(%s)`, + ops, + cvRegex)) + + validConstraintRegex = regexp.MustCompile(fmt.Sprintf( + `^(\s*(%s)\s*(%s)\s*\,?)+$`, + ops, + cvRegex)) +} + +// An individual constraint +type constraint struct { + // The version used in the constraint check. For example, if a constraint + // is '<= 2.0.0' the con a version instance representing 2.0.0. + con *Version + + // The original parsed version (e.g., 4.x from != 4.x) + orig string + + // The original operator for the constraint + origfunc string + + // When an x is used as part of the version (e.g., 1.x) + minorDirty bool + dirty bool + patchDirty bool +} + +// Check if a version meets the constraint +func (c *constraint) check(v *Version) (bool, error) { + return constraintOps[c.origfunc](v, c) +} + +// String prints an individual constraint into a string +func (c *constraint) string() string { + return c.origfunc + c.orig +} + +type cfunc func(v *Version, c *constraint) (bool, error) + +func parseConstraint(c string) (*constraint, error) { + if len(c) > 0 { + m := constraintRegex.FindStringSubmatch(c) + if m == nil { + return nil, fmt.Errorf("improper constraint: %s", c) + } + + cs := &constraint{ + orig: m[2], + origfunc: m[1], + } + + ver := m[2] + minorDirty := false + patchDirty := false + dirty := false + if isX(m[3]) || m[3] == "" { + ver = "0.0.0" + dirty = true + } else if isX(strings.TrimPrefix(m[4], ".")) || m[4] == "" { + minorDirty = true + dirty = true + ver = fmt.Sprintf("%s.0.0%s", m[3], m[6]) + } else if isX(strings.TrimPrefix(m[5], ".")) || m[5] == "" { + dirty = true + patchDirty = true + ver = fmt.Sprintf("%s%s.0%s", m[3], m[4], m[6]) + } + + con, err := NewVersion(ver) + if err != nil { + + // The constraintRegex should catch any regex parsing errors. So, + // we should never get here. + return nil, errors.New("constraint Parser Error") + } + + cs.con = con + cs.minorDirty = minorDirty + cs.patchDirty = patchDirty + cs.dirty = dirty + + return cs, nil + } + + // The rest is the special case where an empty string was passed in which + // is equivalent to * or >=0.0.0 + con, err := StrictNewVersion("0.0.0") + if err != nil { + + // The constraintRegex should catch any regex parsing errors. So, + // we should never get here. + return nil, errors.New("constraint Parser Error") + } + + cs := &constraint{ + con: con, + orig: c, + origfunc: "", + minorDirty: false, + patchDirty: false, + dirty: true, + } + return cs, nil +} + +// Constraint functions +func constraintNotEqual(v *Version, c *constraint) (bool, error) { + if c.dirty { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + if c.con.Major() != v.Major() { + return true, nil + } + if c.con.Minor() != v.Minor() && !c.minorDirty { + return true, nil + } else if c.minorDirty { + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } else if c.con.Patch() != v.Patch() && !c.patchDirty { + return true, nil + } else if c.patchDirty { + // Need to handle prereleases if present + if v.Prerelease() != "" || c.con.Prerelease() != "" { + eq := comparePrerelease(v.Prerelease(), c.con.Prerelease()) != 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } + } + + eq := v.Equal(c.con) + if eq { + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } + + return true, nil +} + +func constraintGreaterThan(v *Version, c *constraint) (bool, error) { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + var eq bool + + if !c.dirty { + eq = v.Compare(c.con) == 1 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } + + if v.Major() > c.con.Major() { + return true, nil + } else if v.Major() < c.con.Major() { + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } else if c.minorDirty { + // This is a range case such as >11. When the version is something like + // 11.1.0 is it not > 11. For that we would need 12 or higher + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } else if c.patchDirty { + // This is for ranges such as >11.1. A version of 11.1.1 is not greater + // which one of 11.2.1 is greater + eq = v.Minor() > c.con.Minor() + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } + + // If we have gotten here we are not comparing pre-preleases and can use the + // Compare function to accomplish that. + eq = v.Compare(c.con) == 1 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) +} + +func constraintLessThan(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + eq := v.Compare(c.con) < 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is greater than or equal to %s", v, c.orig) +} + +func constraintGreaterThanEqual(v *Version, c *constraint) (bool, error) { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + eq := v.Compare(c.con) >= 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than %s", v, c.orig) +} + +func constraintLessThanEqual(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + var eq bool + + if !c.dirty { + eq = v.Compare(c.con) <= 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is greater than %s", v, c.orig) + } + + if v.Major() > c.con.Major() { + return false, fmt.Errorf("%s is greater than %s", v, c.orig) + } else if v.Major() == c.con.Major() && v.Minor() > c.con.Minor() && !c.minorDirty { + return false, fmt.Errorf("%s is greater than %s", v, c.orig) + } + + return true, nil +} + +// ~*, ~>* --> >= 0.0.0 (any) +// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0, <3.0.0 +// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0, <2.1.0 +// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0 +// ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0 +// ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0 +func constraintTilde(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + if v.LessThan(c.con) { + return false, fmt.Errorf("%s is less than %s", v, c.orig) + } + + // ~0.0.0 is a special case where all constraints are accepted. It's + // equivalent to >= 0.0.0. + if c.con.Major() == 0 && c.con.Minor() == 0 && c.con.Patch() == 0 && + !c.minorDirty && !c.patchDirty { + return true, nil + } + + if v.Major() != c.con.Major() { + return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) + } + + if v.Minor() != c.con.Minor() && !c.minorDirty { + return false, fmt.Errorf("%s does not have same major and minor version as %s", v, c.orig) + } + + return true, nil +} + +// When there is a .x (dirty) status it automatically opts in to ~. Otherwise +// it's a straight = +func constraintTildeOrEqual(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + if c.dirty { + return constraintTilde(v, c) + } + + eq := v.Equal(c.con) + if eq { + return true, nil + } + + return false, fmt.Errorf("%s is not equal to %s", v, c.orig) +} + +// ^* --> (any) +// ^1.2.3 --> >=1.2.3 <2.0.0 +// ^1.2 --> >=1.2.0 <2.0.0 +// ^1 --> >=1.0.0 <2.0.0 +// ^0.2.3 --> >=0.2.3 <0.3.0 +// ^0.2 --> >=0.2.0 <0.3.0 +// ^0.0.3 --> >=0.0.3 <0.0.4 +// ^0.0 --> >=0.0.0 <0.1.0 +// ^0 --> >=0.0.0 <1.0.0 +func constraintCaret(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + // This less than handles prereleases + if v.LessThan(c.con) { + return false, fmt.Errorf("%s is less than %s", v, c.orig) + } + + var eq bool + + // ^ when the major > 0 is >=x.y.z < x+1 + if c.con.Major() > 0 || c.minorDirty { + + // ^ has to be within a major range for > 0. Everything less than was + // filtered out with the LessThan call above. This filters out those + // that greater but not within the same major range. + eq = v.Major() == c.con.Major() + if eq { + return true, nil + } + return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) + } + + // ^ when the major is 0 and minor > 0 is >=0.y.z < 0.y+1 + if c.con.Major() == 0 && v.Major() > 0 { + return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) + } + // If the con Minor is > 0 it is not dirty + if c.con.Minor() > 0 || c.patchDirty { + eq = v.Minor() == c.con.Minor() + if eq { + return true, nil + } + return false, fmt.Errorf("%s does not have same minor version as %s. Expected minor versions to match when constraint major version is 0", v, c.orig) + } + + // At this point the major is 0 and the minor is 0 and not dirty. The patch + // is not dirty so we need to check if they are equal. If they are not equal + eq = c.con.Patch() == v.Patch() + if eq { + return true, nil + } + return false, fmt.Errorf("%s does not equal %s. Expect version and constraint to equal when major and minor versions are 0", v, c.orig) +} + +func isX(x string) bool { + switch x { + case "x", "*", "X": + return true + default: + return false + } +} + +func rewriteRange(i string) string { + m := constraintRangeRegex.FindAllStringSubmatch(i, -1) + if m == nil { + return i + } + o := i + for _, v := range m { + t := fmt.Sprintf(">= %s, <= %s", v[1], v[11]) + o = strings.Replace(o, v[0], t, 1) + } + + return o +} diff --git a/vendor/github.com/Masterminds/semver/v3/doc.go b/vendor/github.com/Masterminds/semver/v3/doc.go new file mode 100644 index 000000000..391aa46b7 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/doc.go @@ -0,0 +1,184 @@ +/* +Package semver provides the ability to work with Semantic Versions (http://semver.org) in Go. + +Specifically it provides the ability to: + + * Parse semantic versions + * Sort semantic versions + * Check if a semantic version fits within a set of constraints + * Optionally work with a `v` prefix + +Parsing Semantic Versions + +There are two functions that can parse semantic versions. The `StrictNewVersion` +function only parses valid version 2 semantic versions as outlined in the +specification. The `NewVersion` function attempts to coerce a version into a +semantic version and parse it. For example, if there is a leading v or a version +listed without all 3 parts (e.g. 1.2) it will attempt to coerce it into a valid +semantic version (e.g., 1.2.0). In both cases a `Version` object is returned +that can be sorted, compared, and used in constraints. + +When parsing a version an optional error can be returned if there is an issue +parsing the version. For example, + + v, err := semver.NewVersion("1.2.3-beta.1+b345") + +The version object has methods to get the parts of the version, compare it to +other versions, convert the version back into a string, and get the original +string. For more details please see the documentation +at https://godoc.org/github.com/Masterminds/semver. + +Sorting Semantic Versions + +A set of versions can be sorted using the `sort` package from the standard library. +For example, + + raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} + vs := make([]*semver.Version, len(raw)) + for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v + } + + sort.Sort(semver.Collection(vs)) + +Checking Version Constraints and Comparing Versions + +There are two methods for comparing versions. One uses comparison methods on +`Version` instances and the other is using Constraints. There are some important +differences to notes between these two methods of comparison. + +1. When two versions are compared using functions such as `Compare`, `LessThan`, + and others it will follow the specification and always include prereleases + within the comparison. It will provide an answer valid with the comparison + spec section at https://semver.org/#spec-item-11 +2. When constraint checking is used for checks or validation it will follow a + different set of rules that are common for ranges with tools like npm/js + and Rust/Cargo. This includes considering prereleases to be invalid if the + ranges does not include on. If you want to have it include pre-releases a + simple solution is to include `-0` in your range. +3. Constraint ranges can have some complex rules including the shorthard use of + ~ and ^. For more details on those see the options below. + +There are differences between the two methods or checking versions because the +comparison methods on `Version` follow the specification while comparison ranges +are not part of the specification. Different packages and tools have taken it +upon themselves to come up with range rules. This has resulted in differences. +For example, npm/js and Cargo/Rust follow similar patterns which PHP has a +different pattern for ^. The comparison features in this package follow the +npm/js and Cargo/Rust lead because applications using it have followed similar +patters with their versions. + +Checking a version against version constraints is one of the most featureful +parts of the package. + + c, err := semver.NewConstraint(">= 1.2.3") + if err != nil { + // Handle constraint not being parsable. + } + + v, err := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parsable. + } + // Check if the version meets the constraints. The a variable will be true. + a := c.Check(v) + +Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of comma or space separated AND comparisons. These are then separated by || (OR) +comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. This can also be written as +`">= 1.2, < 3.0.0 || >= 4.2.3"` + +The basic comparisons are: + + * `=`: equal (aliased to no operator) + * `!=`: not equal + * `>`: greater than + * `<`: less than + * `>=`: greater than or equal to + * `<=`: less than or equal to + +Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + + * `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` + * `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5` + +Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the tilde operation. For example, + + * `1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` + * `>= 1.2.x` is equivalent to `>= 1.2.0` + * `<= 2.x` is equivalent to `<= 3` + * `*` is equivalent to `>= 0.0.0` + +Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + + * `~1.2.3` is equivalent to `>= 1.2.3 < 1.3.0` + * `~1` is equivalent to `>= 1, < 2` + * `~2.3` is equivalent to `>= 2.3 < 2.4` + * `~1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` + * `~1.x` is equivalent to `>= 1 < 2` + +Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes once a stable +(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts +as the API stability level. This is useful when comparisons of API versions as a +major change is API breaking. For example, + + * `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` + * `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` + * `^2.3` is equivalent to `>= 2.3, < 3` + * `^2.x` is equivalent to `>= 2.0.0, < 3` + * `^0.2.3` is equivalent to `>=0.2.3 <0.3.0` + * `^0.2` is equivalent to `>=0.2.0 <0.3.0` + * `^0.0.3` is equivalent to `>=0.0.3 <0.0.4` + * `^0.0` is equivalent to `>=0.0.0 <0.1.0` + * `^0` is equivalent to `>=0.0.0 <1.0.0` + +Validation + +In addition to testing a version against a constraint, a version can be validated +against a constraint. When validation fails a slice of errors containing why a +version didn't meet the constraint is returned. For example, + + c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") + if err != nil { + // Handle constraint not being parseable. + } + + v, _ := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parseable. + } + + // Validate a version against a constraint. + a, msgs := c.Validate(v) + // a is false + for _, m := range msgs { + fmt.Println(m) + + // Loops over the errors which would read + // "1.3 is greater than 1.2.3" + // "1.3 is less than 1.4" + } +*/ +package semver diff --git a/vendor/github.com/Masterminds/semver/v3/fuzz.go b/vendor/github.com/Masterminds/semver/v3/fuzz.go new file mode 100644 index 000000000..a242ad705 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/fuzz.go @@ -0,0 +1,22 @@ +// +build gofuzz + +package semver + +func Fuzz(data []byte) int { + d := string(data) + + // Test NewVersion + _, _ = NewVersion(d) + + // Test StrictNewVersion + _, _ = StrictNewVersion(d) + + // Test NewConstraint + _, _ = NewConstraint(d) + + // The return value should be 0 normally, 1 if the priority in future tests + // should be increased, and -1 if future tests should skip passing in that + // data. We do not have a reason to change priority so 0 is always returned. + // There are example tests that do this. + return 0 +} diff --git a/vendor/github.com/Masterminds/semver/v3/version.go b/vendor/github.com/Masterminds/semver/v3/version.go new file mode 100644 index 000000000..d6b9cda3e --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/version.go @@ -0,0 +1,606 @@ +package semver + +import ( + "bytes" + "database/sql/driver" + "encoding/json" + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +// The compiled version of the regex created at init() is cached here so it +// only needs to be created once. +var versionRegex *regexp.Regexp + +var ( + // ErrInvalidSemVer is returned a version is found to be invalid when + // being parsed. + ErrInvalidSemVer = errors.New("Invalid Semantic Version") + + // ErrEmptyString is returned when an empty string is passed in for parsing. + ErrEmptyString = errors.New("Version string empty") + + // ErrInvalidCharacters is returned when invalid characters are found as + // part of a version + ErrInvalidCharacters = errors.New("Invalid characters in version") + + // ErrSegmentStartsZero is returned when a version segment starts with 0. + // This is invalid in SemVer. + ErrSegmentStartsZero = errors.New("Version segment starts with 0") + + // ErrInvalidMetadata is returned when the metadata is an invalid format + ErrInvalidMetadata = errors.New("Invalid Metadata string") + + // ErrInvalidPrerelease is returned when the pre-release is an invalid format + ErrInvalidPrerelease = errors.New("Invalid Prerelease string") +) + +// semVerRegex is the regular expression used to parse a semantic version. +const semVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + +// Version represents a single semantic version. +type Version struct { + major, minor, patch uint64 + pre string + metadata string + original string +} + +func init() { + versionRegex = regexp.MustCompile("^" + semVerRegex + "$") +} + +const num string = "0123456789" +const allowed string = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" + num + +// StrictNewVersion parses a given version and returns an instance of Version or +// an error if unable to parse the version. Only parses valid semantic versions. +// Performs checking that can find errors within the version. +// If you want to coerce a version, such as 1 or 1.2, and perse that as the 1.x +// releases of semver provided use the NewSemver() function. +func StrictNewVersion(v string) (*Version, error) { + // Parsing here does not use RegEx in order to increase performance and reduce + // allocations. + + if len(v) == 0 { + return nil, ErrEmptyString + } + + // Split the parts into [0]major, [1]minor, and [2]patch,prerelease,build + parts := strings.SplitN(v, ".", 3) + if len(parts) != 3 { + return nil, ErrInvalidSemVer + } + + sv := &Version{ + original: v, + } + + // check for prerelease or build metadata + var extra []string + if strings.ContainsAny(parts[2], "-+") { + // Start with the build metadata first as it needs to be on the right + extra = strings.SplitN(parts[2], "+", 2) + if len(extra) > 1 { + // build metadata found + sv.metadata = extra[1] + parts[2] = extra[0] + } + + extra = strings.SplitN(parts[2], "-", 2) + if len(extra) > 1 { + // prerelease found + sv.pre = extra[1] + parts[2] = extra[0] + } + } + + // Validate the number segments are valid. This includes only having positive + // numbers and no leading 0's. + for _, p := range parts { + if !containsOnly(p, num) { + return nil, ErrInvalidCharacters + } + + if len(p) > 1 && p[0] == '0' { + return nil, ErrSegmentStartsZero + } + } + + // Extract the major, minor, and patch elements onto the returned Version + var err error + sv.major, err = strconv.ParseUint(parts[0], 10, 64) + if err != nil { + return nil, err + } + + sv.minor, err = strconv.ParseUint(parts[1], 10, 64) + if err != nil { + return nil, err + } + + sv.patch, err = strconv.ParseUint(parts[2], 10, 64) + if err != nil { + return nil, err + } + + // No prerelease or build metadata found so returning now as a fastpath. + if sv.pre == "" && sv.metadata == "" { + return sv, nil + } + + if sv.pre != "" { + if err = validatePrerelease(sv.pre); err != nil { + return nil, err + } + } + + if sv.metadata != "" { + if err = validateMetadata(sv.metadata); err != nil { + return nil, err + } + } + + return sv, nil +} + +// NewVersion parses a given version and returns an instance of Version or +// an error if unable to parse the version. If the version is SemVer-ish it +// attempts to convert it to SemVer. If you want to validate it was a strict +// semantic version at parse time see StrictNewVersion(). +func NewVersion(v string) (*Version, error) { + m := versionRegex.FindStringSubmatch(v) + if m == nil { + return nil, ErrInvalidSemVer + } + + sv := &Version{ + metadata: m[8], + pre: m[5], + original: v, + } + + var err error + sv.major, err = strconv.ParseUint(m[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + + if m[2] != "" { + sv.minor, err = strconv.ParseUint(strings.TrimPrefix(m[2], "."), 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + } else { + sv.minor = 0 + } + + if m[3] != "" { + sv.patch, err = strconv.ParseUint(strings.TrimPrefix(m[3], "."), 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + } else { + sv.patch = 0 + } + + // Perform some basic due diligence on the extra parts to ensure they are + // valid. + + if sv.pre != "" { + if err = validatePrerelease(sv.pre); err != nil { + return nil, err + } + } + + if sv.metadata != "" { + if err = validateMetadata(sv.metadata); err != nil { + return nil, err + } + } + + return sv, nil +} + +// MustParse parses a given version and panics on error. +func MustParse(v string) *Version { + sv, err := NewVersion(v) + if err != nil { + panic(err) + } + return sv +} + +// String converts a Version object to a string. +// Note, if the original version contained a leading v this version will not. +// See the Original() method to retrieve the original value. Semantic Versions +// don't contain a leading v per the spec. Instead it's optional on +// implementation. +func (v Version) String() string { + var buf bytes.Buffer + + fmt.Fprintf(&buf, "%d.%d.%d", v.major, v.minor, v.patch) + if v.pre != "" { + fmt.Fprintf(&buf, "-%s", v.pre) + } + if v.metadata != "" { + fmt.Fprintf(&buf, "+%s", v.metadata) + } + + return buf.String() +} + +// Original returns the original value passed in to be parsed. +func (v *Version) Original() string { + return v.original +} + +// Major returns the major version. +func (v Version) Major() uint64 { + return v.major +} + +// Minor returns the minor version. +func (v Version) Minor() uint64 { + return v.minor +} + +// Patch returns the patch version. +func (v Version) Patch() uint64 { + return v.patch +} + +// Prerelease returns the pre-release version. +func (v Version) Prerelease() string { + return v.pre +} + +// Metadata returns the metadata on the version. +func (v Version) Metadata() string { + return v.metadata +} + +// originalVPrefix returns the original 'v' prefix if any. +func (v Version) originalVPrefix() string { + + // Note, only lowercase v is supported as a prefix by the parser. + if v.original != "" && v.original[:1] == "v" { + return v.original[:1] + } + return "" +} + +// IncPatch produces the next patch version. +// If the current version does not have prerelease/metadata information, +// it unsets metadata and prerelease values, increments patch number. +// If the current version has any of prerelease or metadata information, +// it unsets both values and keeps current patch value +func (v Version) IncPatch() Version { + vNext := v + // according to http://semver.org/#spec-item-9 + // Pre-release versions have a lower precedence than the associated normal version. + // according to http://semver.org/#spec-item-10 + // Build metadata SHOULD be ignored when determining version precedence. + if v.pre != "" { + vNext.metadata = "" + vNext.pre = "" + } else { + vNext.metadata = "" + vNext.pre = "" + vNext.patch = v.patch + 1 + } + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// IncMinor produces the next minor version. +// Sets patch to 0. +// Increments minor number. +// Unsets metadata. +// Unsets prerelease status. +func (v Version) IncMinor() Version { + vNext := v + vNext.metadata = "" + vNext.pre = "" + vNext.patch = 0 + vNext.minor = v.minor + 1 + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// IncMajor produces the next major version. +// Sets patch to 0. +// Sets minor to 0. +// Increments major number. +// Unsets metadata. +// Unsets prerelease status. +func (v Version) IncMajor() Version { + vNext := v + vNext.metadata = "" + vNext.pre = "" + vNext.patch = 0 + vNext.minor = 0 + vNext.major = v.major + 1 + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// SetPrerelease defines the prerelease value. +// Value must not include the required 'hyphen' prefix. +func (v Version) SetPrerelease(prerelease string) (Version, error) { + vNext := v + if len(prerelease) > 0 { + if err := validatePrerelease(prerelease); err != nil { + return vNext, err + } + } + vNext.pre = prerelease + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext, nil +} + +// SetMetadata defines metadata value. +// Value must not include the required 'plus' prefix. +func (v Version) SetMetadata(metadata string) (Version, error) { + vNext := v + if len(metadata) > 0 { + if err := validateMetadata(metadata); err != nil { + return vNext, err + } + } + vNext.metadata = metadata + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext, nil +} + +// LessThan tests if one version is less than another one. +func (v *Version) LessThan(o *Version) bool { + return v.Compare(o) < 0 +} + +// GreaterThan tests if one version is greater than another one. +func (v *Version) GreaterThan(o *Version) bool { + return v.Compare(o) > 0 +} + +// Equal tests if two versions are equal to each other. +// Note, versions can be equal with different metadata since metadata +// is not considered part of the comparable version. +func (v *Version) Equal(o *Version) bool { + return v.Compare(o) == 0 +} + +// Compare compares this version to another one. It returns -1, 0, or 1 if +// the version smaller, equal, or larger than the other version. +// +// Versions are compared by X.Y.Z. Build metadata is ignored. Prerelease is +// lower than the version without a prerelease. Compare always takes into account +// prereleases. If you want to work with ranges using typical range syntaxes that +// skip prereleases if the range is not looking for them use constraints. +func (v *Version) Compare(o *Version) int { + // Compare the major, minor, and patch version for differences. If a + // difference is found return the comparison. + if d := compareSegment(v.Major(), o.Major()); d != 0 { + return d + } + if d := compareSegment(v.Minor(), o.Minor()); d != 0 { + return d + } + if d := compareSegment(v.Patch(), o.Patch()); d != 0 { + return d + } + + // At this point the major, minor, and patch versions are the same. + ps := v.pre + po := o.Prerelease() + + if ps == "" && po == "" { + return 0 + } + if ps == "" { + return 1 + } + if po == "" { + return -1 + } + + return comparePrerelease(ps, po) +} + +// UnmarshalJSON implements JSON.Unmarshaler interface. +func (v *Version) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + temp, err := NewVersion(s) + if err != nil { + return err + } + v.major = temp.major + v.minor = temp.minor + v.patch = temp.patch + v.pre = temp.pre + v.metadata = temp.metadata + v.original = temp.original + return nil +} + +// MarshalJSON implements JSON.Marshaler interface. +func (v Version) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// Scan implements the SQL.Scanner interface. +func (v *Version) Scan(value interface{}) error { + var s string + s, _ = value.(string) + temp, err := NewVersion(s) + if err != nil { + return err + } + v.major = temp.major + v.minor = temp.minor + v.patch = temp.patch + v.pre = temp.pre + v.metadata = temp.metadata + v.original = temp.original + return nil +} + +// Value implements the Driver.Valuer interface. +func (v Version) Value() (driver.Value, error) { + return v.String(), nil +} + +func compareSegment(v, o uint64) int { + if v < o { + return -1 + } + if v > o { + return 1 + } + + return 0 +} + +func comparePrerelease(v, o string) int { + + // split the prelease versions by their part. The separator, per the spec, + // is a . + sparts := strings.Split(v, ".") + oparts := strings.Split(o, ".") + + // Find the longer length of the parts to know how many loop iterations to + // go through. + slen := len(sparts) + olen := len(oparts) + + l := slen + if olen > slen { + l = olen + } + + // Iterate over each part of the prereleases to compare the differences. + for i := 0; i < l; i++ { + // Since the lentgh of the parts can be different we need to create + // a placeholder. This is to avoid out of bounds issues. + stemp := "" + if i < slen { + stemp = sparts[i] + } + + otemp := "" + if i < olen { + otemp = oparts[i] + } + + d := comparePrePart(stemp, otemp) + if d != 0 { + return d + } + } + + // Reaching here means two versions are of equal value but have different + // metadata (the part following a +). They are not identical in string form + // but the version comparison finds them to be equal. + return 0 +} + +func comparePrePart(s, o string) int { + // Fastpath if they are equal + if s == o { + return 0 + } + + // When s or o are empty we can use the other in an attempt to determine + // the response. + if s == "" { + if o != "" { + return -1 + } + return 1 + } + + if o == "" { + if s != "" { + return 1 + } + return -1 + } + + // When comparing strings "99" is greater than "103". To handle + // cases like this we need to detect numbers and compare them. According + // to the semver spec, numbers are always positive. If there is a - at the + // start like -99 this is to be evaluated as an alphanum. numbers always + // have precedence over alphanum. Parsing as Uints because negative numbers + // are ignored. + + oi, n1 := strconv.ParseUint(o, 10, 64) + si, n2 := strconv.ParseUint(s, 10, 64) + + // The case where both are strings compare the strings + if n1 != nil && n2 != nil { + if s > o { + return 1 + } + return -1 + } else if n1 != nil { + // o is a string and s is a number + return -1 + } else if n2 != nil { + // s is a string and o is a number + return 1 + } + // Both are numbers + if si > oi { + return 1 + } + return -1 + +} + +// Like strings.ContainsAny but does an only instead of any. +func containsOnly(s string, comp string) bool { + return strings.IndexFunc(s, func(r rune) bool { + return !strings.ContainsRune(comp, r) + }) == -1 +} + +// From the spec, "Identifiers MUST comprise only +// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty. +// Numeric identifiers MUST NOT include leading zeroes.". These segments can +// be dot separated. +func validatePrerelease(p string) error { + eparts := strings.Split(p, ".") + for _, p := range eparts { + if containsOnly(p, num) { + if len(p) > 1 && p[0] == '0' { + return ErrSegmentStartsZero + } + } else if !containsOnly(p, allowed) { + return ErrInvalidPrerelease + } + } + + return nil +} + +// From the spec, "Build metadata MAY be denoted by +// appending a plus sign and a series of dot separated identifiers immediately +// following the patch or pre-release version. Identifiers MUST comprise only +// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty." +func validateMetadata(m string) error { + eparts := strings.Split(m, ".") + for _, p := range eparts { + if !containsOnly(p, allowed) { + return ErrInvalidMetadata + } + } + return nil +} diff --git a/vendor/github.com/Masterminds/sprig/v3/.gitignore b/vendor/github.com/Masterminds/sprig/v3/.gitignore new file mode 100644 index 000000000..5e3002f88 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/.gitignore @@ -0,0 +1,2 @@ +vendor/ +/.glide diff --git a/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md b/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md new file mode 100644 index 000000000..fcdd4e88a --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md @@ -0,0 +1,370 @@ +# Changelog + +## Release 3.2.1 (2021-02-04) + +### Changed + +- Upgraded `Masterminds/goutils` to `v1.1.1`. see the [Security Advisory](https://github.com/Masterminds/goutils/security/advisories/GHSA-xg2h-wx96-xgxr) + +## Release 3.2.0 (2020-12-14) + +### Added + +- #211: Added randInt function (thanks @kochurovro) +- #223: Added fromJson and mustFromJson functions (thanks @mholt) +- #242: Added a bcrypt function (thanks @robbiet480) +- #253: Added randBytes function (thanks @MikaelSmith) +- #254: Added dig function for dicts (thanks @nyarly) +- #257: Added regexQuoteMeta for quoting regex metadata (thanks @rheaton) +- #261: Added filepath functions osBase, osDir, osExt, osClean, osIsAbs (thanks @zugl) +- #268: Added and and all functions for testing conditions (thanks @phuslu) +- #181: Added float64 arithmetic addf, add1f, subf, divf, mulf, maxf, and minf + (thanks @andrewmostello) +- #265: Added chunk function to split array into smaller arrays (thanks @karelbilek) +- #270: Extend certificate functions to handle non-RSA keys + add support for + ed25519 keys (thanks @misberner) + +### Changed + +- Removed testing and support for Go 1.12. ed25519 support requires Go 1.13 or newer +- Using semver 3.1.1 and mergo 0.3.11 + +### Fixed + +- #249: Fix htmlDateInZone example (thanks @spawnia) + +NOTE: The dependency github.com/imdario/mergo reverted the breaking change in +0.3.9 via 0.3.10 release. + +## Release 3.1.0 (2020-04-16) + +NOTE: The dependency github.com/imdario/mergo made a behavior change in 0.3.9 +that impacts sprig functionality. Do not use sprig with a version newer than 0.3.8. + +### Added + +- #225: Added support for generating htpasswd hash (thanks @rustycl0ck) +- #224: Added duration filter (thanks @frebib) +- #205: Added `seq` function (thanks @thadc23) + +### Changed + +- #203: Unlambda functions with correct signature (thanks @muesli) +- #236: Updated the license formatting for GitHub display purposes +- #238: Updated package dependency versions. Note, mergo not updated to 0.3.9 + as it causes a breaking change for sprig. That issue is tracked at + https://github.com/imdario/mergo/issues/139 + +### Fixed + +- #229: Fix `seq` example in docs (thanks @kalmant) + +## Release 3.0.2 (2019-12-13) + +### Fixed + +- #220: Updating to semver v3.0.3 to fix issue with <= ranges +- #218: fix typo elyptical->elliptic in ecdsa key description (thanks @laverya) + +## Release 3.0.1 (2019-12-08) + +### Fixed + +- #212: Updated semver fixing broken constraint checking with ^0.0 + +## Release 3.0.0 (2019-10-02) + +### Added + +- #187: Added durationRound function (thanks @yjp20) +- #189: Added numerous template functions that return errors rather than panic (thanks @nrvnrvn) +- #193: Added toRawJson support (thanks @Dean-Coakley) +- #197: Added get support to dicts (thanks @Dean-Coakley) + +### Changed + +- #186: Moving dependency management to Go modules +- #186: Updated semver to v3. This has changes in the way ^ is handled +- #194: Updated documentation on merging and how it copies. Added example using deepCopy +- #196: trunc now supports negative values (thanks @Dean-Coakley) + +## Release 2.22.0 (2019-10-02) + +### Added + +- #173: Added getHostByName function to resolve dns names to ips (thanks @fcgravalos) +- #195: Added deepCopy function for use with dicts + +### Changed + +- Updated merge and mergeOverwrite documentation to explain copying and how to + use deepCopy with it + +## Release 2.21.0 (2019-09-18) + +### Added + +- #122: Added encryptAES/decryptAES functions (thanks @n0madic) +- #128: Added toDecimal support (thanks @Dean-Coakley) +- #169: Added list contcat (thanks @astorath) +- #174: Added deepEqual function (thanks @bonifaido) +- #170: Added url parse and join functions (thanks @astorath) + +### Changed + +- #171: Updated glide config for Google UUID to v1 and to add ranges to semver and testify + +### Fixed + +- #172: Fix semver wildcard example (thanks @piepmatz) +- #175: Fix dateInZone doc example (thanks @s3than) + +## Release 2.20.0 (2019-06-18) + +### Added + +- #164: Adding function to get unix epoch for a time (@mattfarina) +- #166: Adding tests for date_in_zone (@mattfarina) + +### Changed + +- #144: Fix function comments based on best practices from Effective Go (@CodeLingoTeam) +- #150: Handles pointer type for time.Time in "htmlDate" (@mapreal19) +- #161, #157, #160, #153, #158, #156, #155, #159, #152 documentation updates (@badeadan) + +### Fixed + +## Release 2.19.0 (2019-03-02) + +IMPORTANT: This release reverts a change from 2.18.0 + +In the previous release (2.18), we prematurely merged a partial change to the crypto functions that led to creating two sets of crypto functions (I blame @technosophos -- since that's me). This release rolls back that change, and does what was originally intended: It alters the existing crypto functions to use secure random. + +We debated whether this classifies as a change worthy of major revision, but given the proximity to the last release, we have decided that treating 2.18 as a faulty release is the correct course of action. We apologize for any inconvenience. + +### Changed + +- Fix substr panic 35fb796 (Alexey igrychev) +- Remove extra period 1eb7729 (Matthew Lorimor) +- Make random string functions use crypto by default 6ceff26 (Matthew Lorimor) +- README edits/fixes/suggestions 08fe136 (Lauri Apple) + + +## Release 2.18.0 (2019-02-12) + +### Added + +- Added mergeOverwrite function +- cryptographic functions that use secure random (see fe1de12) + +### Changed + +- Improve documentation of regexMatch function, resolves #139 90b89ce (Jan Tagscherer) +- Handle has for nil list 9c10885 (Daniel Cohen) +- Document behaviour of mergeOverwrite fe0dbe9 (Lukas Rieder) +- doc: adds missing documentation. 4b871e6 (Fernandez Ludovic) +- Replace outdated goutils imports 01893d2 (Matthew Lorimor) +- Surface crypto secure random strings from goutils fe1de12 (Matthew Lorimor) +- Handle untyped nil values as paramters to string functions 2b2ec8f (Morten Torkildsen) + +### Fixed + +- Fix dict merge issue and provide mergeOverwrite .dst .src1 to overwrite from src -> dst 4c59c12 (Lukas Rieder) +- Fix substr var names and comments d581f80 (Dean Coakley) +- Fix substr documentation 2737203 (Dean Coakley) + +## Release 2.17.1 (2019-01-03) + +### Fixed + +The 2.17.0 release did not have a version pinned for xstrings, which caused compilation failures when xstrings < 1.2 was used. This adds the correct version string to glide.yaml. + +## Release 2.17.0 (2019-01-03) + +### Added + +- adds alder32sum function and test 6908fc2 (marshallford) +- Added kebabcase function ca331a1 (Ilyes512) + +### Changed + +- Update goutils to 1.1.0 4e1125d (Matt Butcher) + +### Fixed + +- Fix 'has' documentation e3f2a85 (dean-coakley) +- docs(dict): fix typo in pick example dc424f9 (Dustin Specker) +- fixes spelling errors... not sure how that happened 4cf188a (marshallford) + +## Release 2.16.0 (2018-08-13) + +### Added + +- add splitn function fccb0b0 (Helgi Þorbjörnsson) +- Add slice func df28ca7 (gongdo) +- Generate serial number a3bdffd (Cody Coons) +- Extract values of dict with values function df39312 (Lawrence Jones) + +### Changed + +- Modify panic message for list.slice ae38335 (gongdo) +- Minor improvement in code quality - Removed an unreachable piece of code at defaults.go#L26:6 - Resolve formatting issues. 5834241 (Abhishek Kashyap) +- Remove duplicated documentation 1d97af1 (Matthew Fisher) +- Test on go 1.11 49df809 (Helgi Þormar Þorbjörnsson) + +### Fixed + +- Fix file permissions c5f40b5 (gongdo) +- Fix example for buildCustomCert 7779e0d (Tin Lam) + +## Release 2.15.0 (2018-04-02) + +### Added + +- #68 and #69: Add json helpers to docs (thanks @arunvelsriram) +- #66: Add ternary function (thanks @binoculars) +- #67: Allow keys function to take multiple dicts (thanks @binoculars) +- #89: Added sha1sum to crypto function (thanks @benkeil) +- #81: Allow customizing Root CA that used by genSignedCert (thanks @chenzhiwei) +- #92: Add travis testing for go 1.10 +- #93: Adding appveyor config for windows testing + +### Changed + +- #90: Updating to more recent dependencies +- #73: replace satori/go.uuid with google/uuid (thanks @petterw) + +### Fixed + +- #76: Fixed documentation typos (thanks @Thiht) +- Fixed rounding issue on the `ago` function. Note, the removes support for Go 1.8 and older + +## Release 2.14.1 (2017-12-01) + +### Fixed + +- #60: Fix typo in function name documentation (thanks @neil-ca-moore) +- #61: Removing line with {{ due to blocking github pages genertion +- #64: Update the list functions to handle int, string, and other slices for compatibility + +## Release 2.14.0 (2017-10-06) + +This new version of Sprig adds a set of functions for generating and working with SSL certificates. + +- `genCA` generates an SSL Certificate Authority +- `genSelfSignedCert` generates an SSL self-signed certificate +- `genSignedCert` generates an SSL certificate and key based on a given CA + +## Release 2.13.0 (2017-09-18) + +This release adds new functions, including: + +- `regexMatch`, `regexFindAll`, `regexFind`, `regexReplaceAll`, `regexReplaceAllLiteral`, and `regexSplit` to work with regular expressions +- `floor`, `ceil`, and `round` math functions +- `toDate` converts a string to a date +- `nindent` is just like `indent` but also prepends a new line +- `ago` returns the time from `time.Now` + +### Added + +- #40: Added basic regex functionality (thanks @alanquillin) +- #41: Added ceil floor and round functions (thanks @alanquillin) +- #48: Added toDate function (thanks @andreynering) +- #50: Added nindent function (thanks @binoculars) +- #46: Added ago function (thanks @slayer) + +### Changed + +- #51: Updated godocs to include new string functions (thanks @curtisallen) +- #49: Added ability to merge multiple dicts (thanks @binoculars) + +## Release 2.12.0 (2017-05-17) + +- `snakecase`, `camelcase`, and `shuffle` are three new string functions +- `fail` allows you to bail out of a template render when conditions are not met + +## Release 2.11.0 (2017-05-02) + +- Added `toJson` and `toPrettyJson` +- Added `merge` +- Refactored documentation + +## Release 2.10.0 (2017-03-15) + +- Added `semver` and `semverCompare` for Semantic Versions +- `list` replaces `tuple` +- Fixed issue with `join` +- Added `first`, `last`, `intial`, `rest`, `prepend`, `append`, `toString`, `toStrings`, `sortAlpha`, `reverse`, `coalesce`, `pluck`, `pick`, `compact`, `keys`, `omit`, `uniq`, `has`, `without` + +## Release 2.9.0 (2017-02-23) + +- Added `splitList` to split a list +- Added crypto functions of `genPrivateKey` and `derivePassword` + +## Release 2.8.0 (2016-12-21) + +- Added access to several path functions (`base`, `dir`, `clean`, `ext`, and `abs`) +- Added functions for _mutating_ dictionaries (`set`, `unset`, `hasKey`) + +## Release 2.7.0 (2016-12-01) + +- Added `sha256sum` to generate a hash of an input +- Added functions to convert a numeric or string to `int`, `int64`, `float64` + +## Release 2.6.0 (2016-10-03) + +- Added a `uuidv4` template function for generating UUIDs inside of a template. + +## Release 2.5.0 (2016-08-19) + +- New `trimSuffix`, `trimPrefix`, `hasSuffix`, and `hasPrefix` functions +- New aliases have been added for a few functions that didn't follow the naming conventions (`trimAll` and `abbrevBoth`) +- `trimall` and `abbrevboth` (notice the case) are deprecated and will be removed in 3.0.0 + +## Release 2.4.0 (2016-08-16) + +- Adds two functions: `until` and `untilStep` + +## Release 2.3.0 (2016-06-21) + +- cat: Concatenate strings with whitespace separators. +- replace: Replace parts of a string: `replace " " "-" "Me First"` renders "Me-First" +- plural: Format plurals: `len "foo" | plural "one foo" "many foos"` renders "many foos" +- indent: Indent blocks of text in a way that is sensitive to "\n" characters. + +## Release 2.2.0 (2016-04-21) + +- Added a `genPrivateKey` function (Thanks @bacongobbler) + +## Release 2.1.0 (2016-03-30) + +- `default` now prints the default value when it does not receive a value down the pipeline. It is much safer now to do `{{.Foo | default "bar"}}`. +- Added accessors for "hermetic" functions. These return only functions that, when given the same input, produce the same output. + +## Release 2.0.0 (2016-03-29) + +Because we switched from `int` to `int64` as the return value for all integer math functions, the library's major version number has been incremented. + +- `min` complements `max` (formerly `biggest`) +- `empty` indicates that a value is the empty value for its type +- `tuple` creates a tuple inside of a template: `{{$t := tuple "a", "b" "c"}}` +- `dict` creates a dictionary inside of a template `{{$d := dict "key1" "val1" "key2" "val2"}}` +- Date formatters have been added for HTML dates (as used in `date` input fields) +- Integer math functions can convert from a number of types, including `string` (via `strconv.ParseInt`). + +## Release 1.2.0 (2016-02-01) + +- Added quote and squote +- Added b32enc and b32dec +- add now takes varargs +- biggest now takes varargs + +## Release 1.1.0 (2015-12-29) + +- Added #4: Added contains function. strings.Contains, but with the arguments + switched to simplify common pipelines. (thanks krancour) +- Added Travis-CI testing support + +## Release 1.0.0 (2015-12-23) + +- Initial release diff --git a/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt b/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt new file mode 100644 index 000000000..f311b1eaa --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (C) 2013-2020 Masterminds + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Masterminds/sprig/v3/Makefile b/vendor/github.com/Masterminds/sprig/v3/Makefile new file mode 100644 index 000000000..78d409cde --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/Makefile @@ -0,0 +1,9 @@ +.PHONY: test +test: + @echo "==> Running tests" + GO111MODULE=on go test -v + +.PHONY: test-cover +test-cover: + @echo "==> Running Tests with coverage" + GO111MODULE=on go test -cover . diff --git a/vendor/github.com/Masterminds/sprig/v3/README.md b/vendor/github.com/Masterminds/sprig/v3/README.md new file mode 100644 index 000000000..c37ba01c2 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/README.md @@ -0,0 +1,101 @@ +# Sprig: Template functions for Go templates + +[![GoDoc](https://img.shields.io/static/v1?label=godoc&message=reference&color=blue)](https://pkg.go.dev/github.com/Masterminds/sprig/v3) +[![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/sprig)](https://goreportcard.com/report/github.com/Masterminds/sprig) +[![Stability: Sustained](https://masterminds.github.io/stability/sustained.svg)](https://masterminds.github.io/stability/sustained.html) +[![](https://github.com/Masterminds/sprig/workflows/Tests/badge.svg)](https://github.com/Masterminds/sprig/actions) + +The Go language comes with a [built-in template +language](http://golang.org/pkg/text/template/), but not +very many template functions. Sprig is a library that provides more than 100 commonly +used template functions. + +It is inspired by the template functions found in +[Twig](http://twig.sensiolabs.org/documentation) and in various +JavaScript libraries, such as [underscore.js](http://underscorejs.org/). + +## IMPORTANT NOTES + +Sprig leverages [mergo](https://github.com/imdario/mergo) to handle merges. In +its v0.3.9 release there was a behavior change that impacts merging template +functions in sprig. It is currently recommended to use v0.3.8 of that package. +Using v0.3.9 will cause sprig tests to fail. The issue in mergo is tracked at +https://github.com/imdario/mergo/issues/139. + +## Package Versions + +There are two active major versions of the `sprig` package. + +* v3 is currently stable release series on the `master` branch. The Go API should + remain compatible with v2, the current stable version. Behavior change behind + some functions is the reason for the new major version. +* v2 is the previous stable release series. It has been more than three years since + the initial release of v2. You can read the documentation and see the code + on the [release-2](https://github.com/Masterminds/sprig/tree/release-2) branch. + Bug fixes to this major version will continue for some time. + +## Usage + +**Template developers**: Please use Sprig's [function documentation](http://masterminds.github.io/sprig/) for +detailed instructions and code snippets for the >100 template functions available. + +**Go developers**: If you'd like to include Sprig as a library in your program, +our API documentation is available [at GoDoc.org](http://godoc.org/github.com/Masterminds/sprig). + +For standard usage, read on. + +### Load the Sprig library + +To load the Sprig `FuncMap`: + +```go + +import ( + "github.com/Masterminds/sprig" + "html/template" +) + +// This example illustrates that the FuncMap *must* be set before the +// templates themselves are loaded. +tpl := template.Must( + template.New("base").Funcs(sprig.FuncMap()).ParseGlob("*.html") +) + + +``` + +### Calling the functions inside of templates + +By convention, all functions are lowercase. This seems to follow the Go +idiom for template functions (as opposed to template methods, which are +TitleCase). For example, this: + +``` +{{ "hello!" | upper | repeat 5 }} +``` + +produces this: + +``` +HELLO!HELLO!HELLO!HELLO!HELLO! +``` + +## Principles Driving Our Function Selection + +We followed these principles to decide which functions to add and how to implement them: + +- Use template functions to build layout. The following + types of operations are within the domain of template functions: + - Formatting + - Layout + - Simple type conversions + - Utilities that assist in handling common formatting and layout needs (e.g. arithmetic) +- Template functions should not return errors unless there is no way to print + a sensible value. For example, converting a string to an integer should not + produce an error if conversion fails. Instead, it should display a default + value. +- Simple math is necessary for grid layouts, pagers, and so on. Complex math + (anything other than arithmetic) should be done outside of templates. +- Template functions only deal with the data passed into them. They never retrieve + data from a source. +- Finally, do not override core Go template functions. diff --git a/vendor/github.com/Masterminds/sprig/v3/crypto.go b/vendor/github.com/Masterminds/sprig/v3/crypto.go new file mode 100644 index 000000000..13a5cd559 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/crypto.go @@ -0,0 +1,653 @@ +package sprig + +import ( + "bytes" + "crypto" + "crypto/aes" + "crypto/cipher" + "crypto/dsa" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/hmac" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/base64" + "encoding/binary" + "encoding/hex" + "encoding/pem" + "errors" + "fmt" + "hash/adler32" + "io" + "math/big" + "net" + "time" + + "strings" + + "github.com/google/uuid" + bcrypt_lib "golang.org/x/crypto/bcrypt" + "golang.org/x/crypto/scrypt" +) + +func sha256sum(input string) string { + hash := sha256.Sum256([]byte(input)) + return hex.EncodeToString(hash[:]) +} + +func sha1sum(input string) string { + hash := sha1.Sum([]byte(input)) + return hex.EncodeToString(hash[:]) +} + +func adler32sum(input string) string { + hash := adler32.Checksum([]byte(input)) + return fmt.Sprintf("%d", hash) +} + +func bcrypt(input string) string { + hash, err := bcrypt_lib.GenerateFromPassword([]byte(input), bcrypt_lib.DefaultCost) + if err != nil { + return fmt.Sprintf("failed to encrypt string with bcrypt: %s", err) + } + + return string(hash) +} + +func htpasswd(username string, password string) string { + if strings.Contains(username, ":") { + return fmt.Sprintf("invalid username: %s", username) + } + return fmt.Sprintf("%s:%s", username, bcrypt(password)) +} + +func randBytes(count int) (string, error) { + buf := make([]byte, count) + if _, err := rand.Read(buf); err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(buf), nil +} + +// uuidv4 provides a safe and secure UUID v4 implementation +func uuidv4() string { + return uuid.New().String() +} + +var masterPasswordSeed = "com.lyndir.masterpassword" + +var passwordTypeTemplates = map[string][][]byte{ + "maximum": {[]byte("anoxxxxxxxxxxxxxxxxx"), []byte("axxxxxxxxxxxxxxxxxno")}, + "long": {[]byte("CvcvnoCvcvCvcv"), []byte("CvcvCvcvnoCvcv"), []byte("CvcvCvcvCvcvno"), []byte("CvccnoCvcvCvcv"), []byte("CvccCvcvnoCvcv"), + []byte("CvccCvcvCvcvno"), []byte("CvcvnoCvccCvcv"), []byte("CvcvCvccnoCvcv"), []byte("CvcvCvccCvcvno"), []byte("CvcvnoCvcvCvcc"), + []byte("CvcvCvcvnoCvcc"), []byte("CvcvCvcvCvccno"), []byte("CvccnoCvccCvcv"), []byte("CvccCvccnoCvcv"), []byte("CvccCvccCvcvno"), + []byte("CvcvnoCvccCvcc"), []byte("CvcvCvccnoCvcc"), []byte("CvcvCvccCvccno"), []byte("CvccnoCvcvCvcc"), []byte("CvccCvcvnoCvcc"), + []byte("CvccCvcvCvccno")}, + "medium": {[]byte("CvcnoCvc"), []byte("CvcCvcno")}, + "short": {[]byte("Cvcn")}, + "basic": {[]byte("aaanaaan"), []byte("aannaaan"), []byte("aaannaaa")}, + "pin": {[]byte("nnnn")}, +} + +var templateCharacters = map[byte]string{ + 'V': "AEIOU", + 'C': "BCDFGHJKLMNPQRSTVWXYZ", + 'v': "aeiou", + 'c': "bcdfghjklmnpqrstvwxyz", + 'A': "AEIOUBCDFGHJKLMNPQRSTVWXYZ", + 'a': "AEIOUaeiouBCDFGHJKLMNPQRSTVWXYZbcdfghjklmnpqrstvwxyz", + 'n': "0123456789", + 'o': "@&%?,=[]_:-+*$#!'^~;()/.", + 'x': "AEIOUaeiouBCDFGHJKLMNPQRSTVWXYZbcdfghjklmnpqrstvwxyz0123456789!@#$%^&*()", +} + +func derivePassword(counter uint32, passwordType, password, user, site string) string { + var templates = passwordTypeTemplates[passwordType] + if templates == nil { + return fmt.Sprintf("cannot find password template %s", passwordType) + } + + var buffer bytes.Buffer + buffer.WriteString(masterPasswordSeed) + binary.Write(&buffer, binary.BigEndian, uint32(len(user))) + buffer.WriteString(user) + + salt := buffer.Bytes() + key, err := scrypt.Key([]byte(password), salt, 32768, 8, 2, 64) + if err != nil { + return fmt.Sprintf("failed to derive password: %s", err) + } + + buffer.Truncate(len(masterPasswordSeed)) + binary.Write(&buffer, binary.BigEndian, uint32(len(site))) + buffer.WriteString(site) + binary.Write(&buffer, binary.BigEndian, counter) + + var hmacv = hmac.New(sha256.New, key) + hmacv.Write(buffer.Bytes()) + var seed = hmacv.Sum(nil) + var temp = templates[int(seed[0])%len(templates)] + + buffer.Truncate(0) + for i, element := range temp { + passChars := templateCharacters[element] + passChar := passChars[int(seed[i+1])%len(passChars)] + buffer.WriteByte(passChar) + } + + return buffer.String() +} + +func generatePrivateKey(typ string) string { + var priv interface{} + var err error + switch typ { + case "", "rsa": + // good enough for government work + priv, err = rsa.GenerateKey(rand.Reader, 4096) + case "dsa": + key := new(dsa.PrivateKey) + // again, good enough for government work + if err = dsa.GenerateParameters(&key.Parameters, rand.Reader, dsa.L2048N256); err != nil { + return fmt.Sprintf("failed to generate dsa params: %s", err) + } + err = dsa.GenerateKey(key, rand.Reader) + priv = key + case "ecdsa": + // again, good enough for government work + priv, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + case "ed25519": + _, priv, err = ed25519.GenerateKey(rand.Reader) + default: + return "Unknown type " + typ + } + if err != nil { + return fmt.Sprintf("failed to generate private key: %s", err) + } + + return string(pem.EncodeToMemory(pemBlockForKey(priv))) +} + +// DSAKeyFormat stores the format for DSA keys. +// Used by pemBlockForKey +type DSAKeyFormat struct { + Version int + P, Q, G, Y, X *big.Int +} + +func pemBlockForKey(priv interface{}) *pem.Block { + switch k := priv.(type) { + case *rsa.PrivateKey: + return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(k)} + case *dsa.PrivateKey: + val := DSAKeyFormat{ + P: k.P, Q: k.Q, G: k.G, + Y: k.Y, X: k.X, + } + bytes, _ := asn1.Marshal(val) + return &pem.Block{Type: "DSA PRIVATE KEY", Bytes: bytes} + case *ecdsa.PrivateKey: + b, _ := x509.MarshalECPrivateKey(k) + return &pem.Block{Type: "EC PRIVATE KEY", Bytes: b} + default: + // attempt PKCS#8 format for all other keys + b, err := x509.MarshalPKCS8PrivateKey(k) + if err != nil { + return nil + } + return &pem.Block{Type: "PRIVATE KEY", Bytes: b} + } +} + +func parsePrivateKeyPEM(pemBlock string) (crypto.PrivateKey, error) { + block, _ := pem.Decode([]byte(pemBlock)) + if block == nil { + return nil, errors.New("no PEM data in input") + } + + if block.Type == "PRIVATE KEY" { + priv, err := x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + return nil, fmt.Errorf("decoding PEM as PKCS#8: %s", err) + } + return priv, nil + } else if !strings.HasSuffix(block.Type, " PRIVATE KEY") { + return nil, fmt.Errorf("no private key data in PEM block of type %s", block.Type) + } + + switch block.Type[:len(block.Type)-12] { // strip " PRIVATE KEY" + case "RSA": + priv, err := x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, fmt.Errorf("parsing RSA private key from PEM: %s", err) + } + return priv, nil + case "EC": + priv, err := x509.ParseECPrivateKey(block.Bytes) + if err != nil { + return nil, fmt.Errorf("parsing EC private key from PEM: %s", err) + } + return priv, nil + case "DSA": + var k DSAKeyFormat + _, err := asn1.Unmarshal(block.Bytes, &k) + if err != nil { + return nil, fmt.Errorf("parsing DSA private key from PEM: %s", err) + } + priv := &dsa.PrivateKey{ + PublicKey: dsa.PublicKey{ + Parameters: dsa.Parameters{ + P: k.P, Q: k.Q, G: k.G, + }, + Y: k.Y, + }, + X: k.X, + } + return priv, nil + default: + return nil, fmt.Errorf("invalid private key type %s", block.Type) + } +} + +func getPublicKey(priv crypto.PrivateKey) (crypto.PublicKey, error) { + switch k := priv.(type) { + case interface{ Public() crypto.PublicKey }: + return k.Public(), nil + case *dsa.PrivateKey: + return &k.PublicKey, nil + default: + return nil, fmt.Errorf("unable to get public key for type %T", priv) + } +} + +type certificate struct { + Cert string + Key string +} + +func buildCustomCertificate(b64cert string, b64key string) (certificate, error) { + crt := certificate{} + + cert, err := base64.StdEncoding.DecodeString(b64cert) + if err != nil { + return crt, errors.New("unable to decode base64 certificate") + } + + key, err := base64.StdEncoding.DecodeString(b64key) + if err != nil { + return crt, errors.New("unable to decode base64 private key") + } + + decodedCert, _ := pem.Decode(cert) + if decodedCert == nil { + return crt, errors.New("unable to decode certificate") + } + _, err = x509.ParseCertificate(decodedCert.Bytes) + if err != nil { + return crt, fmt.Errorf( + "error parsing certificate: decodedCert.Bytes: %s", + err, + ) + } + + _, err = parsePrivateKeyPEM(string(key)) + if err != nil { + return crt, fmt.Errorf( + "error parsing private key: %s", + err, + ) + } + + crt.Cert = string(cert) + crt.Key = string(key) + + return crt, nil +} + +func generateCertificateAuthority( + cn string, + daysValid int, +) (certificate, error) { + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return certificate{}, fmt.Errorf("error generating rsa key: %s", err) + } + + return generateCertificateAuthorityWithKeyInternal(cn, daysValid, priv) +} + +func generateCertificateAuthorityWithPEMKey( + cn string, + daysValid int, + privPEM string, +) (certificate, error) { + priv, err := parsePrivateKeyPEM(privPEM) + if err != nil { + return certificate{}, fmt.Errorf("parsing private key: %s", err) + } + return generateCertificateAuthorityWithKeyInternal(cn, daysValid, priv) +} + +func generateCertificateAuthorityWithKeyInternal( + cn string, + daysValid int, + priv crypto.PrivateKey, +) (certificate, error) { + ca := certificate{} + + template, err := getBaseCertTemplate(cn, nil, nil, daysValid) + if err != nil { + return ca, err + } + // Override KeyUsage and IsCA + template.KeyUsage = x509.KeyUsageKeyEncipherment | + x509.KeyUsageDigitalSignature | + x509.KeyUsageCertSign + template.IsCA = true + + ca.Cert, ca.Key, err = getCertAndKey(template, priv, template, priv) + + return ca, err +} + +func generateSelfSignedCertificate( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, +) (certificate, error) { + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return certificate{}, fmt.Errorf("error generating rsa key: %s", err) + } + return generateSelfSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, priv) +} + +func generateSelfSignedCertificateWithPEMKey( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + privPEM string, +) (certificate, error) { + priv, err := parsePrivateKeyPEM(privPEM) + if err != nil { + return certificate{}, fmt.Errorf("parsing private key: %s", err) + } + return generateSelfSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, priv) +} + +func generateSelfSignedCertificateWithKeyInternal( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + priv crypto.PrivateKey, +) (certificate, error) { + cert := certificate{} + + template, err := getBaseCertTemplate(cn, ips, alternateDNS, daysValid) + if err != nil { + return cert, err + } + + cert.Cert, cert.Key, err = getCertAndKey(template, priv, template, priv) + + return cert, err +} + +func generateSignedCertificate( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + ca certificate, +) (certificate, error) { + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return certificate{}, fmt.Errorf("error generating rsa key: %s", err) + } + return generateSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, ca, priv) +} + +func generateSignedCertificateWithPEMKey( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + ca certificate, + privPEM string, +) (certificate, error) { + priv, err := parsePrivateKeyPEM(privPEM) + if err != nil { + return certificate{}, fmt.Errorf("parsing private key: %s", err) + } + return generateSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, ca, priv) +} + +func generateSignedCertificateWithKeyInternal( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + ca certificate, + priv crypto.PrivateKey, +) (certificate, error) { + cert := certificate{} + + decodedSignerCert, _ := pem.Decode([]byte(ca.Cert)) + if decodedSignerCert == nil { + return cert, errors.New("unable to decode certificate") + } + signerCert, err := x509.ParseCertificate(decodedSignerCert.Bytes) + if err != nil { + return cert, fmt.Errorf( + "error parsing certificate: decodedSignerCert.Bytes: %s", + err, + ) + } + signerKey, err := parsePrivateKeyPEM(ca.Key) + if err != nil { + return cert, fmt.Errorf( + "error parsing private key: %s", + err, + ) + } + + template, err := getBaseCertTemplate(cn, ips, alternateDNS, daysValid) + if err != nil { + return cert, err + } + + cert.Cert, cert.Key, err = getCertAndKey( + template, + priv, + signerCert, + signerKey, + ) + + return cert, err +} + +func getCertAndKey( + template *x509.Certificate, + signeeKey crypto.PrivateKey, + parent *x509.Certificate, + signingKey crypto.PrivateKey, +) (string, string, error) { + signeePubKey, err := getPublicKey(signeeKey) + if err != nil { + return "", "", fmt.Errorf("error retrieving public key from signee key: %s", err) + } + derBytes, err := x509.CreateCertificate( + rand.Reader, + template, + parent, + signeePubKey, + signingKey, + ) + if err != nil { + return "", "", fmt.Errorf("error creating certificate: %s", err) + } + + certBuffer := bytes.Buffer{} + if err := pem.Encode( + &certBuffer, + &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}, + ); err != nil { + return "", "", fmt.Errorf("error pem-encoding certificate: %s", err) + } + + keyBuffer := bytes.Buffer{} + if err := pem.Encode( + &keyBuffer, + pemBlockForKey(signeeKey), + ); err != nil { + return "", "", fmt.Errorf("error pem-encoding key: %s", err) + } + + return certBuffer.String(), keyBuffer.String(), nil +} + +func getBaseCertTemplate( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, +) (*x509.Certificate, error) { + ipAddresses, err := getNetIPs(ips) + if err != nil { + return nil, err + } + dnsNames, err := getAlternateDNSStrs(alternateDNS) + if err != nil { + return nil, err + } + serialNumberUpperBound := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberUpperBound) + if err != nil { + return nil, err + } + return &x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + CommonName: cn, + }, + IPAddresses: ipAddresses, + DNSNames: dnsNames, + NotBefore: time.Now(), + NotAfter: time.Now().Add(time.Hour * 24 * time.Duration(daysValid)), + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageServerAuth, + x509.ExtKeyUsageClientAuth, + }, + BasicConstraintsValid: true, + }, nil +} + +func getNetIPs(ips []interface{}) ([]net.IP, error) { + if ips == nil { + return []net.IP{}, nil + } + var ipStr string + var ok bool + var netIP net.IP + netIPs := make([]net.IP, len(ips)) + for i, ip := range ips { + ipStr, ok = ip.(string) + if !ok { + return nil, fmt.Errorf("error parsing ip: %v is not a string", ip) + } + netIP = net.ParseIP(ipStr) + if netIP == nil { + return nil, fmt.Errorf("error parsing ip: %s", ipStr) + } + netIPs[i] = netIP + } + return netIPs, nil +} + +func getAlternateDNSStrs(alternateDNS []interface{}) ([]string, error) { + if alternateDNS == nil { + return []string{}, nil + } + var dnsStr string + var ok bool + alternateDNSStrs := make([]string, len(alternateDNS)) + for i, dns := range alternateDNS { + dnsStr, ok = dns.(string) + if !ok { + return nil, fmt.Errorf( + "error processing alternate dns name: %v is not a string", + dns, + ) + } + alternateDNSStrs[i] = dnsStr + } + return alternateDNSStrs, nil +} + +func encryptAES(password string, plaintext string) (string, error) { + if plaintext == "" { + return "", nil + } + + key := make([]byte, 32) + copy(key, []byte(password)) + block, err := aes.NewCipher(key) + if err != nil { + return "", err + } + + content := []byte(plaintext) + blockSize := block.BlockSize() + padding := blockSize - len(content)%blockSize + padtext := bytes.Repeat([]byte{byte(padding)}, padding) + content = append(content, padtext...) + + ciphertext := make([]byte, aes.BlockSize+len(content)) + + iv := ciphertext[:aes.BlockSize] + if _, err := io.ReadFull(rand.Reader, iv); err != nil { + return "", err + } + + mode := cipher.NewCBCEncrypter(block, iv) + mode.CryptBlocks(ciphertext[aes.BlockSize:], content) + + return base64.StdEncoding.EncodeToString(ciphertext), nil +} + +func decryptAES(password string, crypt64 string) (string, error) { + if crypt64 == "" { + return "", nil + } + + key := make([]byte, 32) + copy(key, []byte(password)) + + crypt, err := base64.StdEncoding.DecodeString(crypt64) + if err != nil { + return "", err + } + + block, err := aes.NewCipher(key) + if err != nil { + return "", err + } + + iv := crypt[:aes.BlockSize] + crypt = crypt[aes.BlockSize:] + decrypted := make([]byte, len(crypt)) + mode := cipher.NewCBCDecrypter(block, iv) + mode.CryptBlocks(decrypted, crypt) + + return string(decrypted[:len(decrypted)-int(decrypted[len(decrypted)-1])]), nil +} diff --git a/vendor/github.com/Masterminds/sprig/v3/date.go b/vendor/github.com/Masterminds/sprig/v3/date.go new file mode 100644 index 000000000..ed022ddac --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/date.go @@ -0,0 +1,152 @@ +package sprig + +import ( + "strconv" + "time" +) + +// Given a format and a date, format the date string. +// +// Date can be a `time.Time` or an `int, int32, int64`. +// In the later case, it is treated as seconds since UNIX +// epoch. +func date(fmt string, date interface{}) string { + return dateInZone(fmt, date, "Local") +} + +func htmlDate(date interface{}) string { + return dateInZone("2006-01-02", date, "Local") +} + +func htmlDateInZone(date interface{}, zone string) string { + return dateInZone("2006-01-02", date, zone) +} + +func dateInZone(fmt string, date interface{}, zone string) string { + var t time.Time + switch date := date.(type) { + default: + t = time.Now() + case time.Time: + t = date + case *time.Time: + t = *date + case int64: + t = time.Unix(date, 0) + case int: + t = time.Unix(int64(date), 0) + case int32: + t = time.Unix(int64(date), 0) + } + + loc, err := time.LoadLocation(zone) + if err != nil { + loc, _ = time.LoadLocation("UTC") + } + + return t.In(loc).Format(fmt) +} + +func dateModify(fmt string, date time.Time) time.Time { + d, err := time.ParseDuration(fmt) + if err != nil { + return date + } + return date.Add(d) +} + +func mustDateModify(fmt string, date time.Time) (time.Time, error) { + d, err := time.ParseDuration(fmt) + if err != nil { + return time.Time{}, err + } + return date.Add(d), nil +} + +func dateAgo(date interface{}) string { + var t time.Time + + switch date := date.(type) { + default: + t = time.Now() + case time.Time: + t = date + case int64: + t = time.Unix(date, 0) + case int: + t = time.Unix(int64(date), 0) + } + // Drop resolution to seconds + duration := time.Since(t).Round(time.Second) + return duration.String() +} + +func duration(sec interface{}) string { + var n int64 + switch value := sec.(type) { + default: + n = 0 + case string: + n, _ = strconv.ParseInt(value, 10, 64) + case int64: + n = value + } + return (time.Duration(n) * time.Second).String() +} + +func durationRound(duration interface{}) string { + var d time.Duration + switch duration := duration.(type) { + default: + d = 0 + case string: + d, _ = time.ParseDuration(duration) + case int64: + d = time.Duration(duration) + case time.Time: + d = time.Since(duration) + } + + u := uint64(d) + neg := d < 0 + if neg { + u = -u + } + + var ( + year = uint64(time.Hour) * 24 * 365 + month = uint64(time.Hour) * 24 * 30 + day = uint64(time.Hour) * 24 + hour = uint64(time.Hour) + minute = uint64(time.Minute) + second = uint64(time.Second) + ) + switch { + case u > year: + return strconv.FormatUint(u/year, 10) + "y" + case u > month: + return strconv.FormatUint(u/month, 10) + "mo" + case u > day: + return strconv.FormatUint(u/day, 10) + "d" + case u > hour: + return strconv.FormatUint(u/hour, 10) + "h" + case u > minute: + return strconv.FormatUint(u/minute, 10) + "m" + case u > second: + return strconv.FormatUint(u/second, 10) + "s" + } + return "0s" +} + +func toDate(fmt, str string) time.Time { + t, _ := time.ParseInLocation(fmt, str, time.Local) + return t +} + +func mustToDate(fmt, str string) (time.Time, error) { + return time.ParseInLocation(fmt, str, time.Local) +} + +func unixEpoch(date time.Time) string { + return strconv.FormatInt(date.Unix(), 10) +} diff --git a/vendor/github.com/Masterminds/sprig/v3/defaults.go b/vendor/github.com/Masterminds/sprig/v3/defaults.go new file mode 100644 index 000000000..b9f979666 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/defaults.go @@ -0,0 +1,163 @@ +package sprig + +import ( + "bytes" + "encoding/json" + "math/rand" + "reflect" + "strings" + "time" +) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +// dfault checks whether `given` is set, and returns default if not set. +// +// This returns `d` if `given` appears not to be set, and `given` otherwise. +// +// For numeric types 0 is unset. +// For strings, maps, arrays, and slices, len() = 0 is considered unset. +// For bool, false is unset. +// Structs are never considered unset. +// +// For everything else, including pointers, a nil value is unset. +func dfault(d interface{}, given ...interface{}) interface{} { + + if empty(given) || empty(given[0]) { + return d + } + return given[0] +} + +// empty returns true if the given value has the zero value for its type. +func empty(given interface{}) bool { + g := reflect.ValueOf(given) + if !g.IsValid() { + return true + } + + // Basically adapted from text/template.isTrue + switch g.Kind() { + default: + return g.IsNil() + case reflect.Array, reflect.Slice, reflect.Map, reflect.String: + return g.Len() == 0 + case reflect.Bool: + return !g.Bool() + case reflect.Complex64, reflect.Complex128: + return g.Complex() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return g.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return g.Uint() == 0 + case reflect.Float32, reflect.Float64: + return g.Float() == 0 + case reflect.Struct: + return false + } +} + +// coalesce returns the first non-empty value. +func coalesce(v ...interface{}) interface{} { + for _, val := range v { + if !empty(val) { + return val + } + } + return nil +} + +// all returns true if empty(x) is false for all values x in the list. +// If the list is empty, return true. +func all(v ...interface{}) bool { + for _, val := range v { + if empty(val) { + return false + } + } + return true +} + +// any returns true if empty(x) is false for any x in the list. +// If the list is empty, return false. +func any(v ...interface{}) bool { + for _, val := range v { + if !empty(val) { + return true + } + } + return false +} + +// fromJson decodes JSON into a structured value, ignoring errors. +func fromJson(v string) interface{} { + output, _ := mustFromJson(v) + return output +} + +// mustFromJson decodes JSON into a structured value, returning errors. +func mustFromJson(v string) (interface{}, error) { + var output interface{} + err := json.Unmarshal([]byte(v), &output) + return output, err +} + +// toJson encodes an item into a JSON string +func toJson(v interface{}) string { + output, _ := json.Marshal(v) + return string(output) +} + +func mustToJson(v interface{}) (string, error) { + output, err := json.Marshal(v) + if err != nil { + return "", err + } + return string(output), nil +} + +// toPrettyJson encodes an item into a pretty (indented) JSON string +func toPrettyJson(v interface{}) string { + output, _ := json.MarshalIndent(v, "", " ") + return string(output) +} + +func mustToPrettyJson(v interface{}) (string, error) { + output, err := json.MarshalIndent(v, "", " ") + if err != nil { + return "", err + } + return string(output), nil +} + +// toRawJson encodes an item into a JSON string with no escaping of HTML characters. +func toRawJson(v interface{}) string { + output, err := mustToRawJson(v) + if err != nil { + panic(err) + } + return string(output) +} + +// mustToRawJson encodes an item into a JSON string with no escaping of HTML characters. +func mustToRawJson(v interface{}) (string, error) { + buf := new(bytes.Buffer) + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + err := enc.Encode(&v) + if err != nil { + return "", err + } + return strings.TrimSuffix(buf.String(), "\n"), nil +} + +// ternary returns the first value if the last value is true, otherwise returns the second value. +func ternary(vt interface{}, vf interface{}, v bool) interface{} { + if v { + return vt + } + + return vf +} diff --git a/vendor/github.com/Masterminds/sprig/v3/dict.go b/vendor/github.com/Masterminds/sprig/v3/dict.go new file mode 100644 index 000000000..ade889698 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/dict.go @@ -0,0 +1,174 @@ +package sprig + +import ( + "github.com/imdario/mergo" + "github.com/mitchellh/copystructure" +) + +func get(d map[string]interface{}, key string) interface{} { + if val, ok := d[key]; ok { + return val + } + return "" +} + +func set(d map[string]interface{}, key string, value interface{}) map[string]interface{} { + d[key] = value + return d +} + +func unset(d map[string]interface{}, key string) map[string]interface{} { + delete(d, key) + return d +} + +func hasKey(d map[string]interface{}, key string) bool { + _, ok := d[key] + return ok +} + +func pluck(key string, d ...map[string]interface{}) []interface{} { + res := []interface{}{} + for _, dict := range d { + if val, ok := dict[key]; ok { + res = append(res, val) + } + } + return res +} + +func keys(dicts ...map[string]interface{}) []string { + k := []string{} + for _, dict := range dicts { + for key := range dict { + k = append(k, key) + } + } + return k +} + +func pick(dict map[string]interface{}, keys ...string) map[string]interface{} { + res := map[string]interface{}{} + for _, k := range keys { + if v, ok := dict[k]; ok { + res[k] = v + } + } + return res +} + +func omit(dict map[string]interface{}, keys ...string) map[string]interface{} { + res := map[string]interface{}{} + + omit := make(map[string]bool, len(keys)) + for _, k := range keys { + omit[k] = true + } + + for k, v := range dict { + if _, ok := omit[k]; !ok { + res[k] = v + } + } + return res +} + +func dict(v ...interface{}) map[string]interface{} { + dict := map[string]interface{}{} + lenv := len(v) + for i := 0; i < lenv; i += 2 { + key := strval(v[i]) + if i+1 >= lenv { + dict[key] = "" + continue + } + dict[key] = v[i+1] + } + return dict +} + +func merge(dst map[string]interface{}, srcs ...map[string]interface{}) interface{} { + for _, src := range srcs { + if err := mergo.Merge(&dst, src); err != nil { + // Swallow errors inside of a template. + return "" + } + } + return dst +} + +func mustMerge(dst map[string]interface{}, srcs ...map[string]interface{}) (interface{}, error) { + for _, src := range srcs { + if err := mergo.Merge(&dst, src); err != nil { + return nil, err + } + } + return dst, nil +} + +func mergeOverwrite(dst map[string]interface{}, srcs ...map[string]interface{}) interface{} { + for _, src := range srcs { + if err := mergo.MergeWithOverwrite(&dst, src); err != nil { + // Swallow errors inside of a template. + return "" + } + } + return dst +} + +func mustMergeOverwrite(dst map[string]interface{}, srcs ...map[string]interface{}) (interface{}, error) { + for _, src := range srcs { + if err := mergo.MergeWithOverwrite(&dst, src); err != nil { + return nil, err + } + } + return dst, nil +} + +func values(dict map[string]interface{}) []interface{} { + values := []interface{}{} + for _, value := range dict { + values = append(values, value) + } + + return values +} + +func deepCopy(i interface{}) interface{} { + c, err := mustDeepCopy(i) + if err != nil { + panic("deepCopy error: " + err.Error()) + } + + return c +} + +func mustDeepCopy(i interface{}) (interface{}, error) { + return copystructure.Copy(i) +} + +func dig(ps ...interface{}) (interface{}, error) { + if len(ps) < 3 { + panic("dig needs at least three arguments") + } + dict := ps[len(ps)-1].(map[string]interface{}) + def := ps[len(ps)-2] + ks := make([]string, len(ps)-2) + for i := 0; i < len(ks); i++ { + ks[i] = ps[i].(string) + } + + return digFromDict(dict, def, ks) +} + +func digFromDict(dict map[string]interface{}, d interface{}, ks []string) (interface{}, error) { + k, ns := ks[0], ks[1:len(ks)] + step, has := dict[k] + if !has { + return d, nil + } + if len(ns) == 0 { + return step, nil + } + return digFromDict(step.(map[string]interface{}), d, ns) +} diff --git a/vendor/github.com/Masterminds/sprig/v3/doc.go b/vendor/github.com/Masterminds/sprig/v3/doc.go new file mode 100644 index 000000000..aabb9d448 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/doc.go @@ -0,0 +1,19 @@ +/* +Package sprig provides template functions for Go. + +This package contains a number of utility functions for working with data +inside of Go `html/template` and `text/template` files. + +To add these functions, use the `template.Funcs()` method: + + t := templates.New("foo").Funcs(sprig.FuncMap()) + +Note that you should add the function map before you parse any template files. + + In several cases, Sprig reverses the order of arguments from the way they + appear in the standard library. This is to make it easier to pipe + arguments into functions. + +See http://masterminds.github.io/sprig/ for more detailed documentation on each of the available functions. +*/ +package sprig diff --git a/vendor/github.com/Masterminds/sprig/v3/functions.go b/vendor/github.com/Masterminds/sprig/v3/functions.go new file mode 100644 index 000000000..57fcec1d9 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/functions.go @@ -0,0 +1,382 @@ +package sprig + +import ( + "errors" + "html/template" + "math/rand" + "os" + "path" + "path/filepath" + "reflect" + "strconv" + "strings" + ttemplate "text/template" + "time" + + util "github.com/Masterminds/goutils" + "github.com/huandu/xstrings" + "github.com/shopspring/decimal" +) + +// FuncMap produces the function map. +// +// Use this to pass the functions into the template engine: +// +// tpl := template.New("foo").Funcs(sprig.FuncMap())) +// +func FuncMap() template.FuncMap { + return HtmlFuncMap() +} + +// HermeticTxtFuncMap returns a 'text/template'.FuncMap with only repeatable functions. +func HermeticTxtFuncMap() ttemplate.FuncMap { + r := TxtFuncMap() + for _, name := range nonhermeticFunctions { + delete(r, name) + } + return r +} + +// HermeticHtmlFuncMap returns an 'html/template'.Funcmap with only repeatable functions. +func HermeticHtmlFuncMap() template.FuncMap { + r := HtmlFuncMap() + for _, name := range nonhermeticFunctions { + delete(r, name) + } + return r +} + +// TxtFuncMap returns a 'text/template'.FuncMap +func TxtFuncMap() ttemplate.FuncMap { + return ttemplate.FuncMap(GenericFuncMap()) +} + +// HtmlFuncMap returns an 'html/template'.Funcmap +func HtmlFuncMap() template.FuncMap { + return template.FuncMap(GenericFuncMap()) +} + +// GenericFuncMap returns a copy of the basic function map as a map[string]interface{}. +func GenericFuncMap() map[string]interface{} { + gfm := make(map[string]interface{}, len(genericMap)) + for k, v := range genericMap { + gfm[k] = v + } + return gfm +} + +// These functions are not guaranteed to evaluate to the same result for given input, because they +// refer to the environment or global state. +var nonhermeticFunctions = []string{ + // Date functions + "date", + "date_in_zone", + "date_modify", + "now", + "htmlDate", + "htmlDateInZone", + "dateInZone", + "dateModify", + + // Strings + "randAlphaNum", + "randAlpha", + "randAscii", + "randNumeric", + "randBytes", + "uuidv4", + + // OS + "env", + "expandenv", + + // Network + "getHostByName", +} + +var genericMap = map[string]interface{}{ + "hello": func() string { return "Hello!" }, + + // Date functions + "ago": dateAgo, + "date": date, + "date_in_zone": dateInZone, + "date_modify": dateModify, + "dateInZone": dateInZone, + "dateModify": dateModify, + "duration": duration, + "durationRound": durationRound, + "htmlDate": htmlDate, + "htmlDateInZone": htmlDateInZone, + "must_date_modify": mustDateModify, + "mustDateModify": mustDateModify, + "mustToDate": mustToDate, + "now": time.Now, + "toDate": toDate, + "unixEpoch": unixEpoch, + + // Strings + "abbrev": abbrev, + "abbrevboth": abbrevboth, + "trunc": trunc, + "trim": strings.TrimSpace, + "upper": strings.ToUpper, + "lower": strings.ToLower, + "title": strings.Title, + "untitle": untitle, + "substr": substring, + // Switch order so that "foo" | repeat 5 + "repeat": func(count int, str string) string { return strings.Repeat(str, count) }, + // Deprecated: Use trimAll. + "trimall": func(a, b string) string { return strings.Trim(b, a) }, + // Switch order so that "$foo" | trimall "$" + "trimAll": func(a, b string) string { return strings.Trim(b, a) }, + "trimSuffix": func(a, b string) string { return strings.TrimSuffix(b, a) }, + "trimPrefix": func(a, b string) string { return strings.TrimPrefix(b, a) }, + "nospace": util.DeleteWhiteSpace, + "initials": initials, + "randAlphaNum": randAlphaNumeric, + "randAlpha": randAlpha, + "randAscii": randAscii, + "randNumeric": randNumeric, + "swapcase": util.SwapCase, + "shuffle": xstrings.Shuffle, + "snakecase": xstrings.ToSnakeCase, + "camelcase": xstrings.ToCamelCase, + "kebabcase": xstrings.ToKebabCase, + "wrap": func(l int, s string) string { return util.Wrap(s, l) }, + "wrapWith": func(l int, sep, str string) string { return util.WrapCustom(str, l, sep, true) }, + // Switch order so that "foobar" | contains "foo" + "contains": func(substr string, str string) bool { return strings.Contains(str, substr) }, + "hasPrefix": func(substr string, str string) bool { return strings.HasPrefix(str, substr) }, + "hasSuffix": func(substr string, str string) bool { return strings.HasSuffix(str, substr) }, + "quote": quote, + "squote": squote, + "cat": cat, + "indent": indent, + "nindent": nindent, + "replace": replace, + "plural": plural, + "sha1sum": sha1sum, + "sha256sum": sha256sum, + "adler32sum": adler32sum, + "toString": strval, + + // Wrap Atoi to stop errors. + "atoi": func(a string) int { i, _ := strconv.Atoi(a); return i }, + "int64": toInt64, + "int": toInt, + "float64": toFloat64, + "seq": seq, + "toDecimal": toDecimal, + + //"gt": func(a, b int) bool {return a > b}, + //"gte": func(a, b int) bool {return a >= b}, + //"lt": func(a, b int) bool {return a < b}, + //"lte": func(a, b int) bool {return a <= b}, + + // split "/" foo/bar returns map[int]string{0: foo, 1: bar} + "split": split, + "splitList": func(sep, orig string) []string { return strings.Split(orig, sep) }, + // splitn "/" foo/bar/fuu returns map[int]string{0: foo, 1: bar/fuu} + "splitn": splitn, + "toStrings": strslice, + + "until": until, + "untilStep": untilStep, + + // VERY basic arithmetic. + "add1": func(i interface{}) int64 { return toInt64(i) + 1 }, + "add": func(i ...interface{}) int64 { + var a int64 = 0 + for _, b := range i { + a += toInt64(b) + } + return a + }, + "sub": func(a, b interface{}) int64 { return toInt64(a) - toInt64(b) }, + "div": func(a, b interface{}) int64 { return toInt64(a) / toInt64(b) }, + "mod": func(a, b interface{}) int64 { return toInt64(a) % toInt64(b) }, + "mul": func(a interface{}, v ...interface{}) int64 { + val := toInt64(a) + for _, b := range v { + val = val * toInt64(b) + } + return val + }, + "randInt": func(min, max int) int { return rand.Intn(max-min) + min }, + "add1f": func(i interface{}) float64 { + return execDecimalOp(i, []interface{}{1}, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Add(d2) }) + }, + "addf": func(i ...interface{}) float64 { + a := interface{}(float64(0)) + return execDecimalOp(a, i, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Add(d2) }) + }, + "subf": func(a interface{}, v ...interface{}) float64 { + return execDecimalOp(a, v, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Sub(d2) }) + }, + "divf": func(a interface{}, v ...interface{}) float64 { + return execDecimalOp(a, v, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Div(d2) }) + }, + "mulf": func(a interface{}, v ...interface{}) float64 { + return execDecimalOp(a, v, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Mul(d2) }) + }, + "biggest": max, + "max": max, + "min": min, + "maxf": maxf, + "minf": minf, + "ceil": ceil, + "floor": floor, + "round": round, + + // string slices. Note that we reverse the order b/c that's better + // for template processing. + "join": join, + "sortAlpha": sortAlpha, + + // Defaults + "default": dfault, + "empty": empty, + "coalesce": coalesce, + "all": all, + "any": any, + "compact": compact, + "mustCompact": mustCompact, + "fromJson": fromJson, + "toJson": toJson, + "toPrettyJson": toPrettyJson, + "toRawJson": toRawJson, + "mustFromJson": mustFromJson, + "mustToJson": mustToJson, + "mustToPrettyJson": mustToPrettyJson, + "mustToRawJson": mustToRawJson, + "ternary": ternary, + "deepCopy": deepCopy, + "mustDeepCopy": mustDeepCopy, + + // Reflection + "typeOf": typeOf, + "typeIs": typeIs, + "typeIsLike": typeIsLike, + "kindOf": kindOf, + "kindIs": kindIs, + "deepEqual": reflect.DeepEqual, + + // OS: + "env": os.Getenv, + "expandenv": os.ExpandEnv, + + // Network: + "getHostByName": getHostByName, + + // Paths: + "base": path.Base, + "dir": path.Dir, + "clean": path.Clean, + "ext": path.Ext, + "isAbs": path.IsAbs, + + // Filepaths: + "osBase": filepath.Base, + "osClean": filepath.Clean, + "osDir": filepath.Dir, + "osExt": filepath.Ext, + "osIsAbs": filepath.IsAbs, + + // Encoding: + "b64enc": base64encode, + "b64dec": base64decode, + "b32enc": base32encode, + "b32dec": base32decode, + + // Data Structures: + "tuple": list, // FIXME: with the addition of append/prepend these are no longer immutable. + "list": list, + "dict": dict, + "get": get, + "set": set, + "unset": unset, + "hasKey": hasKey, + "pluck": pluck, + "keys": keys, + "pick": pick, + "omit": omit, + "merge": merge, + "mergeOverwrite": mergeOverwrite, + "mustMerge": mustMerge, + "mustMergeOverwrite": mustMergeOverwrite, + "values": values, + + "append": push, "push": push, + "mustAppend": mustPush, "mustPush": mustPush, + "prepend": prepend, + "mustPrepend": mustPrepend, + "first": first, + "mustFirst": mustFirst, + "rest": rest, + "mustRest": mustRest, + "last": last, + "mustLast": mustLast, + "initial": initial, + "mustInitial": mustInitial, + "reverse": reverse, + "mustReverse": mustReverse, + "uniq": uniq, + "mustUniq": mustUniq, + "without": without, + "mustWithout": mustWithout, + "has": has, + "mustHas": mustHas, + "slice": slice, + "mustSlice": mustSlice, + "concat": concat, + "dig": dig, + "chunk": chunk, + "mustChunk": mustChunk, + + // Crypto: + "bcrypt": bcrypt, + "htpasswd": htpasswd, + "genPrivateKey": generatePrivateKey, + "derivePassword": derivePassword, + "buildCustomCert": buildCustomCertificate, + "genCA": generateCertificateAuthority, + "genCAWithKey": generateCertificateAuthorityWithPEMKey, + "genSelfSignedCert": generateSelfSignedCertificate, + "genSelfSignedCertWithKey": generateSelfSignedCertificateWithPEMKey, + "genSignedCert": generateSignedCertificate, + "genSignedCertWithKey": generateSignedCertificateWithPEMKey, + "encryptAES": encryptAES, + "decryptAES": decryptAES, + "randBytes": randBytes, + + // UUIDs: + "uuidv4": uuidv4, + + // SemVer: + "semver": semver, + "semverCompare": semverCompare, + + // Flow Control: + "fail": func(msg string) (string, error) { return "", errors.New(msg) }, + + // Regex + "regexMatch": regexMatch, + "mustRegexMatch": mustRegexMatch, + "regexFindAll": regexFindAll, + "mustRegexFindAll": mustRegexFindAll, + "regexFind": regexFind, + "mustRegexFind": mustRegexFind, + "regexReplaceAll": regexReplaceAll, + "mustRegexReplaceAll": mustRegexReplaceAll, + "regexReplaceAllLiteral": regexReplaceAllLiteral, + "mustRegexReplaceAllLiteral": mustRegexReplaceAllLiteral, + "regexSplit": regexSplit, + "mustRegexSplit": mustRegexSplit, + "regexQuoteMeta": regexQuoteMeta, + + // URLs: + "urlParse": urlParse, + "urlJoin": urlJoin, +} diff --git a/vendor/github.com/Masterminds/sprig/v3/list.go b/vendor/github.com/Masterminds/sprig/v3/list.go new file mode 100644 index 000000000..ca0fbb789 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/list.go @@ -0,0 +1,464 @@ +package sprig + +import ( + "fmt" + "math" + "reflect" + "sort" +) + +// Reflection is used in these functions so that slices and arrays of strings, +// ints, and other types not implementing []interface{} can be worked with. +// For example, this is useful if you need to work on the output of regexs. + +func list(v ...interface{}) []interface{} { + return v +} + +func push(list interface{}, v interface{}) []interface{} { + l, err := mustPush(list, v) + if err != nil { + panic(err) + } + + return l +} + +func mustPush(list interface{}, v interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[i] = l2.Index(i).Interface() + } + + return append(nl, v), nil + + default: + return nil, fmt.Errorf("Cannot push on type %s", tp) + } +} + +func prepend(list interface{}, v interface{}) []interface{} { + l, err := mustPrepend(list, v) + if err != nil { + panic(err) + } + + return l +} + +func mustPrepend(list interface{}, v interface{}) ([]interface{}, error) { + //return append([]interface{}{v}, list...) + + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[i] = l2.Index(i).Interface() + } + + return append([]interface{}{v}, nl...), nil + + default: + return nil, fmt.Errorf("Cannot prepend on type %s", tp) + } +} + +func chunk(size int, list interface{}) [][]interface{} { + l, err := mustChunk(size, list) + if err != nil { + panic(err) + } + + return l +} + +func mustChunk(size int, list interface{}) ([][]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + + cs := int(math.Floor(float64(l-1)/float64(size)) + 1) + nl := make([][]interface{}, cs) + + for i := 0; i < cs; i++ { + clen := size + if i == cs-1 { + clen = int(math.Floor(math.Mod(float64(l), float64(size)))) + if clen == 0 { + clen = size + } + } + + nl[i] = make([]interface{}, clen) + + for j := 0; j < clen; j++ { + ix := i*size + j + nl[i][j] = l2.Index(ix).Interface() + } + } + + return nl, nil + + default: + return nil, fmt.Errorf("Cannot chunk type %s", tp) + } +} + +func last(list interface{}) interface{} { + l, err := mustLast(list) + if err != nil { + panic(err) + } + + return l +} + +func mustLast(list interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + return l2.Index(l - 1).Interface(), nil + default: + return nil, fmt.Errorf("Cannot find last on type %s", tp) + } +} + +func first(list interface{}) interface{} { + l, err := mustFirst(list) + if err != nil { + panic(err) + } + + return l +} + +func mustFirst(list interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + return l2.Index(0).Interface(), nil + default: + return nil, fmt.Errorf("Cannot find first on type %s", tp) + } +} + +func rest(list interface{}) []interface{} { + l, err := mustRest(list) + if err != nil { + panic(err) + } + + return l +} + +func mustRest(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + nl := make([]interface{}, l-1) + for i := 1; i < l; i++ { + nl[i-1] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find rest on type %s", tp) + } +} + +func initial(list interface{}) []interface{} { + l, err := mustInitial(list) + if err != nil { + panic(err) + } + + return l +} + +func mustInitial(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + nl := make([]interface{}, l-1) + for i := 0; i < l-1; i++ { + nl[i] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find initial on type %s", tp) + } +} + +func sortAlpha(list interface{}) []string { + k := reflect.Indirect(reflect.ValueOf(list)).Kind() + switch k { + case reflect.Slice, reflect.Array: + a := strslice(list) + s := sort.StringSlice(a) + s.Sort() + return s + } + return []string{strval(list)} +} + +func reverse(v interface{}) []interface{} { + l, err := mustReverse(v) + if err != nil { + panic(err) + } + + return l +} + +func mustReverse(v interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(v).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(v) + + l := l2.Len() + // We do not sort in place because the incoming array should not be altered. + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[l-i-1] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find reverse on type %s", tp) + } +} + +func compact(list interface{}) []interface{} { + l, err := mustCompact(list) + if err != nil { + panic(err) + } + + return l +} + +func mustCompact(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !empty(item) { + nl = append(nl, item) + } + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot compact on type %s", tp) + } +} + +func uniq(list interface{}) []interface{} { + l, err := mustUniq(list) + if err != nil { + panic(err) + } + + return l +} + +func mustUniq(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + dest := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !inList(dest, item) { + dest = append(dest, item) + } + } + + return dest, nil + default: + return nil, fmt.Errorf("Cannot find uniq on type %s", tp) + } +} + +func inList(haystack []interface{}, needle interface{}) bool { + for _, h := range haystack { + if reflect.DeepEqual(needle, h) { + return true + } + } + return false +} + +func without(list interface{}, omit ...interface{}) []interface{} { + l, err := mustWithout(list, omit...) + if err != nil { + panic(err) + } + + return l +} + +func mustWithout(list interface{}, omit ...interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + res := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !inList(omit, item) { + res = append(res, item) + } + } + + return res, nil + default: + return nil, fmt.Errorf("Cannot find without on type %s", tp) + } +} + +func has(needle interface{}, haystack interface{}) bool { + l, err := mustHas(needle, haystack) + if err != nil { + panic(err) + } + + return l +} + +func mustHas(needle interface{}, haystack interface{}) (bool, error) { + if haystack == nil { + return false, nil + } + tp := reflect.TypeOf(haystack).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(haystack) + var item interface{} + l := l2.Len() + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if reflect.DeepEqual(needle, item) { + return true, nil + } + } + + return false, nil + default: + return false, fmt.Errorf("Cannot find has on type %s", tp) + } +} + +// $list := [1, 2, 3, 4, 5] +// slice $list -> list[0:5] = list[:] +// slice $list 0 3 -> list[0:3] = list[:3] +// slice $list 3 5 -> list[3:5] +// slice $list 3 -> list[3:5] = list[3:] +func slice(list interface{}, indices ...interface{}) interface{} { + l, err := mustSlice(list, indices...) + if err != nil { + panic(err) + } + + return l +} + +func mustSlice(list interface{}, indices ...interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + var start, end int + if len(indices) > 0 { + start = toInt(indices[0]) + } + if len(indices) < 2 { + end = l + } else { + end = toInt(indices[1]) + } + + return l2.Slice(start, end).Interface(), nil + default: + return nil, fmt.Errorf("list should be type of slice or array but %s", tp) + } +} + +func concat(lists ...interface{}) interface{} { + var res []interface{} + for _, list := range lists { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + for i := 0; i < l2.Len(); i++ { + res = append(res, l2.Index(i).Interface()) + } + default: + panic(fmt.Sprintf("Cannot concat type %s as list", tp)) + } + } + return res +} diff --git a/vendor/github.com/Masterminds/sprig/v3/network.go b/vendor/github.com/Masterminds/sprig/v3/network.go new file mode 100644 index 000000000..108d78a94 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/network.go @@ -0,0 +1,12 @@ +package sprig + +import ( + "math/rand" + "net" +) + +func getHostByName(name string) string { + addrs, _ := net.LookupHost(name) + //TODO: add error handing when release v3 comes out + return addrs[rand.Intn(len(addrs))] +} diff --git a/vendor/github.com/Masterminds/sprig/v3/numeric.go b/vendor/github.com/Masterminds/sprig/v3/numeric.go new file mode 100644 index 000000000..f68e4182e --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/numeric.go @@ -0,0 +1,186 @@ +package sprig + +import ( + "fmt" + "math" + "strconv" + "strings" + + "github.com/spf13/cast" + "github.com/shopspring/decimal" +) + +// toFloat64 converts 64-bit floats +func toFloat64(v interface{}) float64 { + return cast.ToFloat64(v) +} + +func toInt(v interface{}) int { + return cast.ToInt(v) +} + +// toInt64 converts integer types to 64-bit integers +func toInt64(v interface{}) int64 { + return cast.ToInt64(v) +} + +func max(a interface{}, i ...interface{}) int64 { + aa := toInt64(a) + for _, b := range i { + bb := toInt64(b) + if bb > aa { + aa = bb + } + } + return aa +} + +func maxf(a interface{}, i ...interface{}) float64 { + aa := toFloat64(a) + for _, b := range i { + bb := toFloat64(b) + aa = math.Max(aa, bb) + } + return aa +} + +func min(a interface{}, i ...interface{}) int64 { + aa := toInt64(a) + for _, b := range i { + bb := toInt64(b) + if bb < aa { + aa = bb + } + } + return aa +} + +func minf(a interface{}, i ...interface{}) float64 { + aa := toFloat64(a) + for _, b := range i { + bb := toFloat64(b) + aa = math.Min(aa, bb) + } + return aa +} + +func until(count int) []int { + step := 1 + if count < 0 { + step = -1 + } + return untilStep(0, count, step) +} + +func untilStep(start, stop, step int) []int { + v := []int{} + + if stop < start { + if step >= 0 { + return v + } + for i := start; i > stop; i += step { + v = append(v, i) + } + return v + } + + if step <= 0 { + return v + } + for i := start; i < stop; i += step { + v = append(v, i) + } + return v +} + +func floor(a interface{}) float64 { + aa := toFloat64(a) + return math.Floor(aa) +} + +func ceil(a interface{}) float64 { + aa := toFloat64(a) + return math.Ceil(aa) +} + +func round(a interface{}, p int, rOpt ...float64) float64 { + roundOn := .5 + if len(rOpt) > 0 { + roundOn = rOpt[0] + } + val := toFloat64(a) + places := toFloat64(p) + + var round float64 + pow := math.Pow(10, places) + digit := pow * val + _, div := math.Modf(digit) + if div >= roundOn { + round = math.Ceil(digit) + } else { + round = math.Floor(digit) + } + return round / pow +} + +// converts unix octal to decimal +func toDecimal(v interface{}) int64 { + result, err := strconv.ParseInt(fmt.Sprint(v), 8, 64) + if err != nil { + return 0 + } + return result +} + +func seq(params ...int) string { + increment := 1 + switch len(params) { + case 0: + return "" + case 1: + start := 1 + end := params[0] + if end < start { + increment = -1 + } + return intArrayToString(untilStep(start, end+increment, increment), " ") + case 3: + start := params[0] + end := params[2] + step := params[1] + if end < start { + increment = -1 + if step > 0 { + return "" + } + } + return intArrayToString(untilStep(start, end+increment, step), " ") + case 2: + start := params[0] + end := params[1] + step := 1 + if end < start { + step = -1 + } + return intArrayToString(untilStep(start, end+step, step), " ") + default: + return "" + } +} + +func intArrayToString(slice []int, delimeter string) string { + return strings.Trim(strings.Join(strings.Fields(fmt.Sprint(slice)), delimeter), "[]") +} + +// performs a float and subsequent decimal.Decimal conversion on inputs, +// and iterates through a and b executing the mathmetical operation f +func execDecimalOp(a interface{}, b []interface{}, f func(d1, d2 decimal.Decimal) decimal.Decimal) float64 { + prt := decimal.NewFromFloat(toFloat64(a)) + for _, x := range b { + dx := decimal.NewFromFloat(toFloat64(x)) + prt = f(prt, dx) + } + rslt, _ := prt.Float64() + return rslt +} diff --git a/vendor/github.com/Masterminds/sprig/v3/reflect.go b/vendor/github.com/Masterminds/sprig/v3/reflect.go new file mode 100644 index 000000000..8a65c132f --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/reflect.go @@ -0,0 +1,28 @@ +package sprig + +import ( + "fmt" + "reflect" +) + +// typeIs returns true if the src is the type named in target. +func typeIs(target string, src interface{}) bool { + return target == typeOf(src) +} + +func typeIsLike(target string, src interface{}) bool { + t := typeOf(src) + return target == t || "*"+target == t +} + +func typeOf(src interface{}) string { + return fmt.Sprintf("%T", src) +} + +func kindIs(target string, src interface{}) bool { + return target == kindOf(src) +} + +func kindOf(src interface{}) string { + return reflect.ValueOf(src).Kind().String() +} diff --git a/vendor/github.com/Masterminds/sprig/v3/regex.go b/vendor/github.com/Masterminds/sprig/v3/regex.go new file mode 100644 index 000000000..fab551018 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/regex.go @@ -0,0 +1,83 @@ +package sprig + +import ( + "regexp" +) + +func regexMatch(regex string, s string) bool { + match, _ := regexp.MatchString(regex, s) + return match +} + +func mustRegexMatch(regex string, s string) (bool, error) { + return regexp.MatchString(regex, s) +} + +func regexFindAll(regex string, s string, n int) []string { + r := regexp.MustCompile(regex) + return r.FindAllString(s, n) +} + +func mustRegexFindAll(regex string, s string, n int) ([]string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return []string{}, err + } + return r.FindAllString(s, n), nil +} + +func regexFind(regex string, s string) string { + r := regexp.MustCompile(regex) + return r.FindString(s) +} + +func mustRegexFind(regex string, s string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.FindString(s), nil +} + +func regexReplaceAll(regex string, s string, repl string) string { + r := regexp.MustCompile(regex) + return r.ReplaceAllString(s, repl) +} + +func mustRegexReplaceAll(regex string, s string, repl string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.ReplaceAllString(s, repl), nil +} + +func regexReplaceAllLiteral(regex string, s string, repl string) string { + r := regexp.MustCompile(regex) + return r.ReplaceAllLiteralString(s, repl) +} + +func mustRegexReplaceAllLiteral(regex string, s string, repl string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.ReplaceAllLiteralString(s, repl), nil +} + +func regexSplit(regex string, s string, n int) []string { + r := regexp.MustCompile(regex) + return r.Split(s, n) +} + +func mustRegexSplit(regex string, s string, n int) ([]string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return []string{}, err + } + return r.Split(s, n), nil +} + +func regexQuoteMeta(s string) string { + return regexp.QuoteMeta(s) +} diff --git a/vendor/github.com/Masterminds/sprig/v3/semver.go b/vendor/github.com/Masterminds/sprig/v3/semver.go new file mode 100644 index 000000000..3fbe08aa6 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/semver.go @@ -0,0 +1,23 @@ +package sprig + +import ( + sv2 "github.com/Masterminds/semver/v3" +) + +func semverCompare(constraint, version string) (bool, error) { + c, err := sv2.NewConstraint(constraint) + if err != nil { + return false, err + } + + v, err := sv2.NewVersion(version) + if err != nil { + return false, err + } + + return c.Check(v), nil +} + +func semver(version string) (*sv2.Version, error) { + return sv2.NewVersion(version) +} diff --git a/vendor/github.com/Masterminds/sprig/v3/strings.go b/vendor/github.com/Masterminds/sprig/v3/strings.go new file mode 100644 index 000000000..e0ae628c8 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/strings.go @@ -0,0 +1,236 @@ +package sprig + +import ( + "encoding/base32" + "encoding/base64" + "fmt" + "reflect" + "strconv" + "strings" + + util "github.com/Masterminds/goutils" +) + +func base64encode(v string) string { + return base64.StdEncoding.EncodeToString([]byte(v)) +} + +func base64decode(v string) string { + data, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return err.Error() + } + return string(data) +} + +func base32encode(v string) string { + return base32.StdEncoding.EncodeToString([]byte(v)) +} + +func base32decode(v string) string { + data, err := base32.StdEncoding.DecodeString(v) + if err != nil { + return err.Error() + } + return string(data) +} + +func abbrev(width int, s string) string { + if width < 4 { + return s + } + r, _ := util.Abbreviate(s, width) + return r +} + +func abbrevboth(left, right int, s string) string { + if right < 4 || left > 0 && right < 7 { + return s + } + r, _ := util.AbbreviateFull(s, left, right) + return r +} +func initials(s string) string { + // Wrap this just to eliminate the var args, which templates don't do well. + return util.Initials(s) +} + +func randAlphaNumeric(count int) string { + // It is not possible, it appears, to actually generate an error here. + r, _ := util.CryptoRandomAlphaNumeric(count) + return r +} + +func randAlpha(count int) string { + r, _ := util.CryptoRandomAlphabetic(count) + return r +} + +func randAscii(count int) string { + r, _ := util.CryptoRandomAscii(count) + return r +} + +func randNumeric(count int) string { + r, _ := util.CryptoRandomNumeric(count) + return r +} + +func untitle(str string) string { + return util.Uncapitalize(str) +} + +func quote(str ...interface{}) string { + out := make([]string, 0, len(str)) + for _, s := range str { + if s != nil { + out = append(out, fmt.Sprintf("%q", strval(s))) + } + } + return strings.Join(out, " ") +} + +func squote(str ...interface{}) string { + out := make([]string, 0, len(str)) + for _, s := range str { + if s != nil { + out = append(out, fmt.Sprintf("'%v'", s)) + } + } + return strings.Join(out, " ") +} + +func cat(v ...interface{}) string { + v = removeNilElements(v) + r := strings.TrimSpace(strings.Repeat("%v ", len(v))) + return fmt.Sprintf(r, v...) +} + +func indent(spaces int, v string) string { + pad := strings.Repeat(" ", spaces) + return pad + strings.Replace(v, "\n", "\n"+pad, -1) +} + +func nindent(spaces int, v string) string { + return "\n" + indent(spaces, v) +} + +func replace(old, new, src string) string { + return strings.Replace(src, old, new, -1) +} + +func plural(one, many string, count int) string { + if count == 1 { + return one + } + return many +} + +func strslice(v interface{}) []string { + switch v := v.(type) { + case []string: + return v + case []interface{}: + b := make([]string, 0, len(v)) + for _, s := range v { + if s != nil { + b = append(b, strval(s)) + } + } + return b + default: + val := reflect.ValueOf(v) + switch val.Kind() { + case reflect.Array, reflect.Slice: + l := val.Len() + b := make([]string, 0, l) + for i := 0; i < l; i++ { + value := val.Index(i).Interface() + if value != nil { + b = append(b, strval(value)) + } + } + return b + default: + if v == nil { + return []string{} + } + + return []string{strval(v)} + } + } +} + +func removeNilElements(v []interface{}) []interface{} { + newSlice := make([]interface{}, 0, len(v)) + for _, i := range v { + if i != nil { + newSlice = append(newSlice, i) + } + } + return newSlice +} + +func strval(v interface{}) string { + switch v := v.(type) { + case string: + return v + case []byte: + return string(v) + case error: + return v.Error() + case fmt.Stringer: + return v.String() + default: + return fmt.Sprintf("%v", v) + } +} + +func trunc(c int, s string) string { + if c < 0 && len(s)+c > 0 { + return s[len(s)+c:] + } + if c >= 0 && len(s) > c { + return s[:c] + } + return s +} + +func join(sep string, v interface{}) string { + return strings.Join(strslice(v), sep) +} + +func split(sep, orig string) map[string]string { + parts := strings.Split(orig, sep) + res := make(map[string]string, len(parts)) + for i, v := range parts { + res["_"+strconv.Itoa(i)] = v + } + return res +} + +func splitn(sep string, n int, orig string) map[string]string { + parts := strings.SplitN(orig, sep, n) + res := make(map[string]string, len(parts)) + for i, v := range parts { + res["_"+strconv.Itoa(i)] = v + } + return res +} + +// substring creates a substring of the given string. +// +// If start is < 0, this calls string[:end]. +// +// If start is >= 0 and end < 0 or end bigger than s length, this calls string[start:] +// +// Otherwise, this calls string[start, end]. +func substring(start, end int, s string) string { + if start < 0 { + return s[:end] + } + if end < 0 || end > len(s) { + return s[start:] + } + return s[start:end] +} diff --git a/vendor/github.com/Masterminds/sprig/v3/url.go b/vendor/github.com/Masterminds/sprig/v3/url.go new file mode 100644 index 000000000..b8e120e19 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/url.go @@ -0,0 +1,66 @@ +package sprig + +import ( + "fmt" + "net/url" + "reflect" +) + +func dictGetOrEmpty(dict map[string]interface{}, key string) string { + value, ok := dict[key] + if !ok { + return "" + } + tp := reflect.TypeOf(value).Kind() + if tp != reflect.String { + panic(fmt.Sprintf("unable to parse %s key, must be of type string, but %s found", key, tp.String())) + } + return reflect.ValueOf(value).String() +} + +// parses given URL to return dict object +func urlParse(v string) map[string]interface{} { + dict := map[string]interface{}{} + parsedURL, err := url.Parse(v) + if err != nil { + panic(fmt.Sprintf("unable to parse url: %s", err)) + } + dict["scheme"] = parsedURL.Scheme + dict["host"] = parsedURL.Host + dict["hostname"] = parsedURL.Hostname() + dict["path"] = parsedURL.Path + dict["query"] = parsedURL.RawQuery + dict["opaque"] = parsedURL.Opaque + dict["fragment"] = parsedURL.Fragment + if parsedURL.User != nil { + dict["userinfo"] = parsedURL.User.String() + } else { + dict["userinfo"] = "" + } + + return dict +} + +// join given dict to URL string +func urlJoin(d map[string]interface{}) string { + resURL := url.URL{ + Scheme: dictGetOrEmpty(d, "scheme"), + Host: dictGetOrEmpty(d, "host"), + Path: dictGetOrEmpty(d, "path"), + RawQuery: dictGetOrEmpty(d, "query"), + Opaque: dictGetOrEmpty(d, "opaque"), + Fragment: dictGetOrEmpty(d, "fragment"), + } + userinfo := dictGetOrEmpty(d, "userinfo") + var user *url.Userinfo + if userinfo != "" { + tempURL, err := url.Parse(fmt.Sprintf("proto://%s@host", userinfo)) + if err != nil { + panic(fmt.Sprintf("unable to parse userinfo in dict: %s", err)) + } + user = tempURL.User + } + + resURL.User = user + return resURL.String() +} diff --git a/vendor/github.com/armon/go-radix/.gitignore b/vendor/github.com/armon/go-radix/.gitignore new file mode 100644 index 000000000..00268614f --- /dev/null +++ b/vendor/github.com/armon/go-radix/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/armon/go-radix/.travis.yml b/vendor/github.com/armon/go-radix/.travis.yml new file mode 100644 index 000000000..1a0bbea6c --- /dev/null +++ b/vendor/github.com/armon/go-radix/.travis.yml @@ -0,0 +1,3 @@ +language: go +go: + - tip diff --git a/vendor/github.com/armon/go-radix/LICENSE b/vendor/github.com/armon/go-radix/LICENSE new file mode 100644 index 000000000..a5df10e67 --- /dev/null +++ b/vendor/github.com/armon/go-radix/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Armon Dadgar + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/armon/go-radix/README.md b/vendor/github.com/armon/go-radix/README.md new file mode 100644 index 000000000..26f42a283 --- /dev/null +++ b/vendor/github.com/armon/go-radix/README.md @@ -0,0 +1,38 @@ +go-radix [![Build Status](https://travis-ci.org/armon/go-radix.png)](https://travis-ci.org/armon/go-radix) +========= + +Provides the `radix` package that implements a [radix tree](http://en.wikipedia.org/wiki/Radix_tree). +The package only provides a single `Tree` implementation, optimized for sparse nodes. + +As a radix tree, it provides the following: + * O(k) operations. In many cases, this can be faster than a hash table since + the hash function is an O(k) operation, and hash tables have very poor cache locality. + * Minimum / Maximum value lookups + * Ordered iteration + +For an immutable variant, see [go-immutable-radix](https://github.com/hashicorp/go-immutable-radix). + +Documentation +============= + +The full documentation is available on [Godoc](http://godoc.org/github.com/armon/go-radix). + +Example +======= + +Below is a simple example of usage + +```go +// Create a tree +r := radix.New() +r.Insert("foo", 1) +r.Insert("bar", 2) +r.Insert("foobar", 2) + +// Find the longest prefix match +m, _, _ := r.LongestPrefix("foozip") +if m != "foo" { + panic("should be foo") +} +``` + diff --git a/vendor/github.com/armon/go-radix/radix.go b/vendor/github.com/armon/go-radix/radix.go new file mode 100644 index 000000000..e2bb22eb9 --- /dev/null +++ b/vendor/github.com/armon/go-radix/radix.go @@ -0,0 +1,540 @@ +package radix + +import ( + "sort" + "strings" +) + +// WalkFn is used when walking the tree. Takes a +// key and value, returning if iteration should +// be terminated. +type WalkFn func(s string, v interface{}) bool + +// leafNode is used to represent a value +type leafNode struct { + key string + val interface{} +} + +// edge is used to represent an edge node +type edge struct { + label byte + node *node +} + +type node struct { + // leaf is used to store possible leaf + leaf *leafNode + + // prefix is the common prefix we ignore + prefix string + + // Edges should be stored in-order for iteration. + // We avoid a fully materialized slice to save memory, + // since in most cases we expect to be sparse + edges edges +} + +func (n *node) isLeaf() bool { + return n.leaf != nil +} + +func (n *node) addEdge(e edge) { + n.edges = append(n.edges, e) + n.edges.Sort() +} + +func (n *node) updateEdge(label byte, node *node) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + n.edges[idx].node = node + return + } + panic("replacing missing edge") +} + +func (n *node) getEdge(label byte) *node { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + return n.edges[idx].node + } + return nil +} + +func (n *node) delEdge(label byte) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + copy(n.edges[idx:], n.edges[idx+1:]) + n.edges[len(n.edges)-1] = edge{} + n.edges = n.edges[:len(n.edges)-1] + } +} + +type edges []edge + +func (e edges) Len() int { + return len(e) +} + +func (e edges) Less(i, j int) bool { + return e[i].label < e[j].label +} + +func (e edges) Swap(i, j int) { + e[i], e[j] = e[j], e[i] +} + +func (e edges) Sort() { + sort.Sort(e) +} + +// Tree implements a radix tree. This can be treated as a +// Dictionary abstract data type. The main advantage over +// a standard hash map is prefix-based lookups and +// ordered iteration, +type Tree struct { + root *node + size int +} + +// New returns an empty Tree +func New() *Tree { + return NewFromMap(nil) +} + +// NewFromMap returns a new tree containing the keys +// from an existing map +func NewFromMap(m map[string]interface{}) *Tree { + t := &Tree{root: &node{}} + for k, v := range m { + t.Insert(k, v) + } + return t +} + +// Len is used to return the number of elements in the tree +func (t *Tree) Len() int { + return t.size +} + +// longestPrefix finds the length of the shared prefix +// of two strings +func longestPrefix(k1, k2 string) int { + max := len(k1) + if l := len(k2); l < max { + max = l + } + var i int + for i = 0; i < max; i++ { + if k1[i] != k2[i] { + break + } + } + return i +} + +// Insert is used to add a newentry or update +// an existing entry. Returns if updated. +func (t *Tree) Insert(s string, v interface{}) (interface{}, bool) { + var parent *node + n := t.root + search := s + for { + // Handle key exhaution + if len(search) == 0 { + if n.isLeaf() { + old := n.leaf.val + n.leaf.val = v + return old, true + } + + n.leaf = &leafNode{ + key: s, + val: v, + } + t.size++ + return nil, false + } + + // Look for the edge + parent = n + n = n.getEdge(search[0]) + + // No edge, create one + if n == nil { + e := edge{ + label: search[0], + node: &node{ + leaf: &leafNode{ + key: s, + val: v, + }, + prefix: search, + }, + } + parent.addEdge(e) + t.size++ + return nil, false + } + + // Determine longest prefix of the search key on match + commonPrefix := longestPrefix(search, n.prefix) + if commonPrefix == len(n.prefix) { + search = search[commonPrefix:] + continue + } + + // Split the node + t.size++ + child := &node{ + prefix: search[:commonPrefix], + } + parent.updateEdge(search[0], child) + + // Restore the existing node + child.addEdge(edge{ + label: n.prefix[commonPrefix], + node: n, + }) + n.prefix = n.prefix[commonPrefix:] + + // Create a new leaf node + leaf := &leafNode{ + key: s, + val: v, + } + + // If the new key is a subset, add to to this node + search = search[commonPrefix:] + if len(search) == 0 { + child.leaf = leaf + return nil, false + } + + // Create a new edge for the node + child.addEdge(edge{ + label: search[0], + node: &node{ + leaf: leaf, + prefix: search, + }, + }) + return nil, false + } +} + +// Delete is used to delete a key, returning the previous +// value and if it was deleted +func (t *Tree) Delete(s string) (interface{}, bool) { + var parent *node + var label byte + n := t.root + search := s + for { + // Check for key exhaution + if len(search) == 0 { + if !n.isLeaf() { + break + } + goto DELETE + } + + // Look for an edge + parent = n + label = search[0] + n = n.getEdge(label) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + return nil, false + +DELETE: + // Delete the leaf + leaf := n.leaf + n.leaf = nil + t.size-- + + // Check if we should delete this node from the parent + if parent != nil && len(n.edges) == 0 { + parent.delEdge(label) + } + + // Check if we should merge this node + if n != t.root && len(n.edges) == 1 { + n.mergeChild() + } + + // Check if we should merge the parent's other child + if parent != nil && parent != t.root && len(parent.edges) == 1 && !parent.isLeaf() { + parent.mergeChild() + } + + return leaf.val, true +} + +// DeletePrefix is used to delete the subtree under a prefix +// Returns how many nodes were deleted +// Use this to delete large subtrees efficiently +func (t *Tree) DeletePrefix(s string) int { + return t.deletePrefix(nil, t.root, s) +} + +// delete does a recursive deletion +func (t *Tree) deletePrefix(parent, n *node, prefix string) int { + // Check for key exhaustion + if len(prefix) == 0 { + // Remove the leaf node + subTreeSize := 0 + //recursively walk from all edges of the node to be deleted + recursiveWalk(n, func(s string, v interface{}) bool { + subTreeSize++ + return false + }) + if n.isLeaf() { + n.leaf = nil + } + n.edges = nil // deletes the entire subtree + + // Check if we should merge the parent's other child + if parent != nil && parent != t.root && len(parent.edges) == 1 && !parent.isLeaf() { + parent.mergeChild() + } + t.size -= subTreeSize + return subTreeSize + } + + // Look for an edge + label := prefix[0] + child := n.getEdge(label) + if child == nil || (!strings.HasPrefix(child.prefix, prefix) && !strings.HasPrefix(prefix, child.prefix)) { + return 0 + } + + // Consume the search prefix + if len(child.prefix) > len(prefix) { + prefix = prefix[len(prefix):] + } else { + prefix = prefix[len(child.prefix):] + } + return t.deletePrefix(n, child, prefix) +} + +func (n *node) mergeChild() { + e := n.edges[0] + child := e.node + n.prefix = n.prefix + child.prefix + n.leaf = child.leaf + n.edges = child.edges +} + +// Get is used to lookup a specific key, returning +// the value and if it was found +func (t *Tree) Get(s string) (interface{}, bool) { + n := t.root + search := s + for { + // Check for key exhaution + if len(search) == 0 { + if n.isLeaf() { + return n.leaf.val, true + } + break + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + return nil, false +} + +// LongestPrefix is like Get, but instead of an +// exact match, it will return the longest prefix match. +func (t *Tree) LongestPrefix(s string) (string, interface{}, bool) { + var last *leafNode + n := t.root + search := s + for { + // Look for a leaf node + if n.isLeaf() { + last = n.leaf + } + + // Check for key exhaution + if len(search) == 0 { + break + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + if last != nil { + return last.key, last.val, true + } + return "", nil, false +} + +// Minimum is used to return the minimum value in the tree +func (t *Tree) Minimum() (string, interface{}, bool) { + n := t.root + for { + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } + if len(n.edges) > 0 { + n = n.edges[0].node + } else { + break + } + } + return "", nil, false +} + +// Maximum is used to return the maximum value in the tree +func (t *Tree) Maximum() (string, interface{}, bool) { + n := t.root + for { + if num := len(n.edges); num > 0 { + n = n.edges[num-1].node + continue + } + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } + break + } + return "", nil, false +} + +// Walk is used to walk the tree +func (t *Tree) Walk(fn WalkFn) { + recursiveWalk(t.root, fn) +} + +// WalkPrefix is used to walk the tree under a prefix +func (t *Tree) WalkPrefix(prefix string, fn WalkFn) { + n := t.root + search := prefix + for { + // Check for key exhaution + if len(search) == 0 { + recursiveWalk(n, fn) + return + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + + } else if strings.HasPrefix(n.prefix, search) { + // Child may be under our search prefix + recursiveWalk(n, fn) + return + } else { + break + } + } + +} + +// WalkPath is used to walk the tree, but only visiting nodes +// from the root down to a given leaf. Where WalkPrefix walks +// all the entries *under* the given prefix, this walks the +// entries *above* the given prefix. +func (t *Tree) WalkPath(path string, fn WalkFn) { + n := t.root + search := path + for { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return + } + + // Check for key exhaution + if len(search) == 0 { + return + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + return + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } +} + +// recursiveWalk is used to do a pre-order walk of a node +// recursively. Returns true if the walk should be aborted +func recursiveWalk(n *node, fn WalkFn) bool { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return true + } + + // Recurse on the children + for _, e := range n.edges { + if recursiveWalk(e.node, fn) { + return true + } + } + return false +} + +// ToMap is used to walk the tree and convert it into a map +func (t *Tree) ToMap() map[string]interface{} { + out := make(map[string]interface{}, t.size) + t.Walk(func(k string, v interface{}) bool { + out[k] = v + return false + }) + return out +} diff --git a/vendor/github.com/bgentry/speakeasy/.gitignore b/vendor/github.com/bgentry/speakeasy/.gitignore new file mode 100644 index 000000000..9e1311461 --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/.gitignore @@ -0,0 +1,2 @@ +example/example +example/example.exe diff --git a/vendor/github.com/bgentry/speakeasy/LICENSE b/vendor/github.com/bgentry/speakeasy/LICENSE new file mode 100644 index 000000000..37d60fc35 --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/LICENSE @@ -0,0 +1,24 @@ +MIT License + +Copyright (c) 2017 Blake Gentry + +This license applies to the non-Windows portions of this library. The Windows +portion maintains its own Apache 2.0 license. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS b/vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS new file mode 100644 index 000000000..ff177f612 --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [2013] [the CloudFoundry Authors] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/bgentry/speakeasy/Readme.md b/vendor/github.com/bgentry/speakeasy/Readme.md new file mode 100644 index 000000000..fceda7518 --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/Readme.md @@ -0,0 +1,30 @@ +# Speakeasy + +This package provides cross-platform Go (#golang) helpers for taking user input +from the terminal while not echoing the input back (similar to `getpasswd`). The +package uses syscalls to avoid any dependence on cgo, and is therefore +compatible with cross-compiling. + +[![GoDoc](https://godoc.org/github.com/bgentry/speakeasy?status.png)][godoc] + +## Unicode + +Multi-byte unicode characters work successfully on Mac OS X. On Windows, +however, this may be problematic (as is UTF in general on Windows). Other +platforms have not been tested. + +## License + +The code herein was not written by me, but was compiled from two separate open +source packages. Unix portions were imported from [gopass][gopass], while +Windows portions were imported from the [CloudFoundry Go CLI][cf-cli]'s +[Windows terminal helpers][cf-ui-windows]. + +The [license for the windows portion](./LICENSE_WINDOWS) has been copied exactly +from the source (though I attempted to fill in the correct owner in the +boilerplate copyright notice). + +[cf-cli]: https://github.com/cloudfoundry/cli "CloudFoundry Go CLI" +[cf-ui-windows]: https://github.com/cloudfoundry/cli/blob/master/src/cf/terminal/ui_windows.go "CloudFoundry Go CLI Windows input helpers" +[godoc]: https://godoc.org/github.com/bgentry/speakeasy "speakeasy on Godoc.org" +[gopass]: https://code.google.com/p/gopass "gopass" diff --git a/vendor/github.com/bgentry/speakeasy/speakeasy.go b/vendor/github.com/bgentry/speakeasy/speakeasy.go new file mode 100644 index 000000000..71c1dd1b9 --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/speakeasy.go @@ -0,0 +1,49 @@ +package speakeasy + +import ( + "fmt" + "io" + "os" + "strings" +) + +// Ask the user to enter a password with input hidden. prompt is a string to +// display before the user's input. Returns the provided password, or an error +// if the command failed. +func Ask(prompt string) (password string, err error) { + return FAsk(os.Stdout, prompt) +} + +// FAsk is the same as Ask, except it is possible to specify the file to write +// the prompt to. If 'nil' is passed as the writer, no prompt will be written. +func FAsk(wr io.Writer, prompt string) (password string, err error) { + if wr != nil && prompt != "" { + fmt.Fprint(wr, prompt) // Display the prompt. + } + password, err = getPassword() + + // Carriage return after the user input. + if wr != nil { + fmt.Fprintln(wr, "") + } + return +} + +func readline() (value string, err error) { + var valb []byte + var n int + b := make([]byte, 1) + for { + // read one byte at a time so we don't accidentally read extra bytes + n, err = os.Stdin.Read(b) + if err != nil && err != io.EOF { + return "", err + } + if n == 0 || b[0] == '\n' { + break + } + valb = append(valb, b[0]) + } + + return strings.TrimSuffix(string(valb), "\r"), nil +} diff --git a/vendor/github.com/bgentry/speakeasy/speakeasy_unix.go b/vendor/github.com/bgentry/speakeasy/speakeasy_unix.go new file mode 100644 index 000000000..d99fda191 --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/speakeasy_unix.go @@ -0,0 +1,93 @@ +// based on https://code.google.com/p/gopass +// Author: johnsiilver@gmail.com (John Doak) +// +// Original code is based on code by RogerV in the golang-nuts thread: +// https://groups.google.com/group/golang-nuts/browse_thread/thread/40cc41e9d9fc9247 + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package speakeasy + +import ( + "fmt" + "os" + "os/signal" + "strings" + "syscall" +) + +const sttyArg0 = "/bin/stty" + +var ( + sttyArgvEOff = []string{"stty", "-echo"} + sttyArgvEOn = []string{"stty", "echo"} +) + +// getPassword gets input hidden from the terminal from a user. This is +// accomplished by turning off terminal echo, reading input from the user and +// finally turning on terminal echo. +func getPassword() (password string, err error) { + sig := make(chan os.Signal, 10) + brk := make(chan bool) + + // File descriptors for stdin, stdout, and stderr. + fd := []uintptr{os.Stdin.Fd(), os.Stdout.Fd(), os.Stderr.Fd()} + + // Setup notifications of termination signals to channel sig, create a process to + // watch for these signals so we can turn back on echo if need be. + signal.Notify(sig, syscall.SIGHUP, syscall.SIGINT, syscall.SIGKILL, syscall.SIGQUIT, + syscall.SIGTERM) + go catchSignal(fd, sig, brk) + + // Turn off the terminal echo. + pid, err := echoOff(fd) + if err != nil { + return "", err + } + + // Turn on the terminal echo and stop listening for signals. + defer signal.Stop(sig) + defer close(brk) + defer echoOn(fd) + + syscall.Wait4(pid, nil, 0, nil) + + line, err := readline() + if err == nil { + password = strings.TrimSpace(line) + } else { + err = fmt.Errorf("failed during password entry: %s", err) + } + + return password, err +} + +// echoOff turns off the terminal echo. +func echoOff(fd []uintptr) (int, error) { + pid, err := syscall.ForkExec(sttyArg0, sttyArgvEOff, &syscall.ProcAttr{Dir: "", Files: fd}) + if err != nil { + return 0, fmt.Errorf("failed turning off console echo for password entry:\n\t%s", err) + } + return pid, nil +} + +// echoOn turns back on the terminal echo. +func echoOn(fd []uintptr) { + // Turn on the terminal echo. + pid, e := syscall.ForkExec(sttyArg0, sttyArgvEOn, &syscall.ProcAttr{Dir: "", Files: fd}) + if e == nil { + syscall.Wait4(pid, nil, 0, nil) + } +} + +// catchSignal tries to catch SIGKILL, SIGQUIT and SIGINT so that we can turn +// terminal echo back on before the program ends. Otherwise the user is left +// with echo off on their terminal. +func catchSignal(fd []uintptr, sig chan os.Signal, brk chan bool) { + select { + case <-sig: + echoOn(fd) + os.Exit(-1) + case <-brk: + } +} diff --git a/vendor/github.com/bgentry/speakeasy/speakeasy_windows.go b/vendor/github.com/bgentry/speakeasy/speakeasy_windows.go new file mode 100644 index 000000000..c2093a809 --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/speakeasy_windows.go @@ -0,0 +1,41 @@ +// +build windows + +package speakeasy + +import ( + "syscall" +) + +// SetConsoleMode function can be used to change value of ENABLE_ECHO_INPUT: +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx +const ENABLE_ECHO_INPUT = 0x0004 + +func getPassword() (password string, err error) { + var oldMode uint32 + + err = syscall.GetConsoleMode(syscall.Stdin, &oldMode) + if err != nil { + return + } + + var newMode uint32 = (oldMode &^ ENABLE_ECHO_INPUT) + + err = setConsoleMode(syscall.Stdin, newMode) + defer setConsoleMode(syscall.Stdin, oldMode) + if err != nil { + return + } + + return readline() +} + +func setConsoleMode(console syscall.Handle, mode uint32) (err error) { + dll := syscall.MustLoadDLL("kernel32") + proc := dll.MustFindProc("SetConsoleMode") + r, _, err := proc.Call(uintptr(console), uintptr(mode)) + + if r == 0 { + return err + } + return nil +} diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml new file mode 100644 index 000000000..d8156a60b --- /dev/null +++ b/vendor/github.com/google/uuid/.travis.yml @@ -0,0 +1,9 @@ +language: go + +go: + - 1.4.3 + - 1.5.3 + - tip + +script: + - go test -v ./... diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md new file mode 100644 index 000000000..04fdf09f1 --- /dev/null +++ b/vendor/github.com/google/uuid/CONTRIBUTING.md @@ -0,0 +1,10 @@ +# How to contribute + +We definitely welcome patches and contribution to this project! + +### Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://cla.developers.google.com/clas). + +You may have already signed it for other Google projects. diff --git a/vendor/github.com/google/uuid/CONTRIBUTORS b/vendor/github.com/google/uuid/CONTRIBUTORS new file mode 100644 index 000000000..b4bb97f6b --- /dev/null +++ b/vendor/github.com/google/uuid/CONTRIBUTORS @@ -0,0 +1,9 @@ +Paul Borman +bmatsuo +shawnps +theory +jboverfelt +dsymonds +cd1 +wallclockbuilder +dansouza diff --git a/vendor/github.com/google/uuid/LICENSE b/vendor/github.com/google/uuid/LICENSE new file mode 100644 index 000000000..5dc68268d --- /dev/null +++ b/vendor/github.com/google/uuid/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md new file mode 100644 index 000000000..f765a46f9 --- /dev/null +++ b/vendor/github.com/google/uuid/README.md @@ -0,0 +1,19 @@ +# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) +The uuid package generates and inspects UUIDs based on +[RFC 4122](http://tools.ietf.org/html/rfc4122) +and DCE 1.1: Authentication and Security Services. + +This package is based on the github.com/pborman/uuid package (previously named +code.google.com/p/go-uuid). It differs from these earlier packages in that +a UUID is a 16 byte array rather than a byte slice. One loss due to this +change is the ability to represent an invalid UUID (vs a NIL UUID). + +###### Install +`go get github.com/google/uuid` + +###### Documentation +[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) + +Full `go doc` style documentation for the package can be viewed online without +installing this package by using the GoDoc site here: +http://pkg.go.dev/github.com/google/uuid diff --git a/vendor/github.com/google/uuid/dce.go b/vendor/github.com/google/uuid/dce.go new file mode 100644 index 000000000..fa820b9d3 --- /dev/null +++ b/vendor/github.com/google/uuid/dce.go @@ -0,0 +1,80 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "fmt" + "os" +) + +// A Domain represents a Version 2 domain +type Domain byte + +// Domain constants for DCE Security (Version 2) UUIDs. +const ( + Person = Domain(0) + Group = Domain(1) + Org = Domain(2) +) + +// NewDCESecurity returns a DCE Security (Version 2) UUID. +// +// The domain should be one of Person, Group or Org. +// On a POSIX system the id should be the users UID for the Person +// domain and the users GID for the Group. The meaning of id for +// the domain Org or on non-POSIX systems is site defined. +// +// For a given domain/id pair the same token may be returned for up to +// 7 minutes and 10 seconds. +func NewDCESecurity(domain Domain, id uint32) (UUID, error) { + uuid, err := NewUUID() + if err == nil { + uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 + uuid[9] = byte(domain) + binary.BigEndian.PutUint32(uuid[0:], id) + } + return uuid, err +} + +// NewDCEPerson returns a DCE Security (Version 2) UUID in the person +// domain with the id returned by os.Getuid. +// +// NewDCESecurity(Person, uint32(os.Getuid())) +func NewDCEPerson() (UUID, error) { + return NewDCESecurity(Person, uint32(os.Getuid())) +} + +// NewDCEGroup returns a DCE Security (Version 2) UUID in the group +// domain with the id returned by os.Getgid. +// +// NewDCESecurity(Group, uint32(os.Getgid())) +func NewDCEGroup() (UUID, error) { + return NewDCESecurity(Group, uint32(os.Getgid())) +} + +// Domain returns the domain for a Version 2 UUID. Domains are only defined +// for Version 2 UUIDs. +func (uuid UUID) Domain() Domain { + return Domain(uuid[9]) +} + +// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2 +// UUIDs. +func (uuid UUID) ID() uint32 { + return binary.BigEndian.Uint32(uuid[0:4]) +} + +func (d Domain) String() string { + switch d { + case Person: + return "Person" + case Group: + return "Group" + case Org: + return "Org" + } + return fmt.Sprintf("Domain%d", int(d)) +} diff --git a/vendor/github.com/google/uuid/doc.go b/vendor/github.com/google/uuid/doc.go new file mode 100644 index 000000000..5b8a4b9af --- /dev/null +++ b/vendor/github.com/google/uuid/doc.go @@ -0,0 +1,12 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uuid generates and inspects UUIDs. +// +// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security +// Services. +// +// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to +// maps or compared directly. +package uuid diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go new file mode 100644 index 000000000..b404f4bec --- /dev/null +++ b/vendor/github.com/google/uuid/hash.go @@ -0,0 +1,53 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "crypto/md5" + "crypto/sha1" + "hash" +) + +// Well known namespace IDs and UUIDs +var ( + NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) + Nil UUID // empty UUID, all zeros +) + +// NewHash returns a new UUID derived from the hash of space concatenated with +// data generated by h. The hash should be at least 16 byte in length. The +// first 16 bytes of the hash are used to form the UUID. The version of the +// UUID will be the lower 4 bits of version. NewHash is used to implement +// NewMD5 and NewSHA1. +func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { + h.Reset() + h.Write(space[:]) //nolint:errcheck + h.Write(data) //nolint:errcheck + s := h.Sum(nil) + var uuid UUID + copy(uuid[:], s) + uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) + uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant + return uuid +} + +// NewMD5 returns a new MD5 (Version 3) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(md5.New(), space, data, 3) +func NewMD5(space UUID, data []byte) UUID { + return NewHash(md5.New(), space, data, 3) +} + +// NewSHA1 returns a new SHA1 (Version 5) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(sha1.New(), space, data, 5) +func NewSHA1(space UUID, data []byte) UUID { + return NewHash(sha1.New(), space, data, 5) +} diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go new file mode 100644 index 000000000..14bd34072 --- /dev/null +++ b/vendor/github.com/google/uuid/marshal.go @@ -0,0 +1,38 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "fmt" + +// MarshalText implements encoding.TextMarshaler. +func (uuid UUID) MarshalText() ([]byte, error) { + var js [36]byte + encodeHex(js[:], uuid) + return js[:], nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (uuid *UUID) UnmarshalText(data []byte) error { + id, err := ParseBytes(data) + if err != nil { + return err + } + *uuid = id + return nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (uuid UUID) MarshalBinary() ([]byte, error) { + return uuid[:], nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (uuid *UUID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(uuid[:], data) + return nil +} diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go new file mode 100644 index 000000000..d651a2b06 --- /dev/null +++ b/vendor/github.com/google/uuid/node.go @@ -0,0 +1,90 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "sync" +) + +var ( + nodeMu sync.Mutex + ifname string // name of interface being used + nodeID [6]byte // hardware for version 1 UUIDs + zeroID [6]byte // nodeID with only 0's +) + +// NodeInterface returns the name of the interface from which the NodeID was +// derived. The interface "user" is returned if the NodeID was set by +// SetNodeID. +func NodeInterface() string { + defer nodeMu.Unlock() + nodeMu.Lock() + return ifname +} + +// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. +// If name is "" then the first usable interface found will be used or a random +// Node ID will be generated. If a named interface cannot be found then false +// is returned. +// +// SetNodeInterface never fails when name is "". +func SetNodeInterface(name string) bool { + defer nodeMu.Unlock() + nodeMu.Lock() + return setNodeInterface(name) +} + +func setNodeInterface(name string) bool { + iname, addr := getHardwareInterface(name) // null implementation for js + if iname != "" && addr != nil { + ifname = iname + copy(nodeID[:], addr) + return true + } + + // We found no interfaces with a valid hardware address. If name + // does not specify a specific interface generate a random Node ID + // (section 4.1.6) + if name == "" { + ifname = "random" + randomBits(nodeID[:]) + return true + } + return false +} + +// NodeID returns a slice of a copy of the current Node ID, setting the Node ID +// if not already set. +func NodeID() []byte { + defer nodeMu.Unlock() + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + nid := nodeID + return nid[:] +} + +// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes +// of id are used. If id is less than 6 bytes then false is returned and the +// Node ID is not set. +func SetNodeID(id []byte) bool { + if len(id) < 6 { + return false + } + defer nodeMu.Unlock() + nodeMu.Lock() + copy(nodeID[:], id) + ifname = "user" + return true +} + +// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is +// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) NodeID() []byte { + var node [6]byte + copy(node[:], uuid[10:]) + return node[:] +} diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go new file mode 100644 index 000000000..24b78edc9 --- /dev/null +++ b/vendor/github.com/google/uuid/node_js.go @@ -0,0 +1,12 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build js + +package uuid + +// getHardwareInterface returns nil values for the JS version of the code. +// This remvoves the "net" dependency, because it is not used in the browser. +// Using the "net" library inflates the size of the transpiled JS code by 673k bytes. +func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/vendor/github.com/google/uuid/node_net.go b/vendor/github.com/google/uuid/node_net.go new file mode 100644 index 000000000..0cbbcddbd --- /dev/null +++ b/vendor/github.com/google/uuid/node_net.go @@ -0,0 +1,33 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !js + +package uuid + +import "net" + +var interfaces []net.Interface // cached list of interfaces + +// getHardwareInterface returns the name and hardware address of interface name. +// If name is "" then the name and hardware address of one of the system's +// interfaces is returned. If no interfaces are found (name does not exist or +// there are no interfaces) then "", nil is returned. +// +// Only addresses of at least 6 bytes are returned. +func getHardwareInterface(name string) (string, []byte) { + if interfaces == nil { + var err error + interfaces, err = net.Interfaces() + if err != nil { + return "", nil + } + } + for _, ifs := range interfaces { + if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { + return ifs.Name, ifs.HardwareAddr + } + } + return "", nil +} diff --git a/vendor/github.com/google/uuid/null.go b/vendor/github.com/google/uuid/null.go new file mode 100644 index 000000000..d7fcbf286 --- /dev/null +++ b/vendor/github.com/google/uuid/null.go @@ -0,0 +1,118 @@ +// Copyright 2021 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "database/sql/driver" + "encoding/json" + "fmt" +) + +var jsonNull = []byte("null") + +// NullUUID represents a UUID that may be null. +// NullUUID implements the SQL driver.Scanner interface so +// it can be used as a scan destination: +// +// var u uuid.NullUUID +// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u) +// ... +// if u.Valid { +// // use u.UUID +// } else { +// // NULL value +// } +// +type NullUUID struct { + UUID UUID + Valid bool // Valid is true if UUID is not NULL +} + +// Scan implements the SQL driver.Scanner interface. +func (nu *NullUUID) Scan(value interface{}) error { + if value == nil { + nu.UUID, nu.Valid = Nil, false + return nil + } + + err := nu.UUID.Scan(value) + if err != nil { + nu.Valid = false + return err + } + + nu.Valid = true + return nil +} + +// Value implements the driver Valuer interface. +func (nu NullUUID) Value() (driver.Value, error) { + if !nu.Valid { + return nil, nil + } + // Delegate to UUID Value function + return nu.UUID.Value() +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (nu NullUUID) MarshalBinary() ([]byte, error) { + if nu.Valid { + return nu.UUID[:], nil + } + + return []byte(nil), nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (nu *NullUUID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(nu.UUID[:], data) + nu.Valid = true + return nil +} + +// MarshalText implements encoding.TextMarshaler. +func (nu NullUUID) MarshalText() ([]byte, error) { + if nu.Valid { + return nu.UUID.MarshalText() + } + + return jsonNull, nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (nu *NullUUID) UnmarshalText(data []byte) error { + id, err := ParseBytes(data) + if err != nil { + nu.Valid = false + return err + } + nu.UUID = id + nu.Valid = true + return nil +} + +// MarshalJSON implements json.Marshaler. +func (nu NullUUID) MarshalJSON() ([]byte, error) { + if nu.Valid { + return json.Marshal(nu.UUID) + } + + return jsonNull, nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (nu *NullUUID) UnmarshalJSON(data []byte) error { + if bytes.Equal(data, jsonNull) { + *nu = NullUUID{} + return nil // valid null UUID + } + err := json.Unmarshal(data, &nu.UUID) + nu.Valid = err == nil + return err +} diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go new file mode 100644 index 000000000..2e02ec06c --- /dev/null +++ b/vendor/github.com/google/uuid/sql.go @@ -0,0 +1,59 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "database/sql/driver" + "fmt" +) + +// Scan implements sql.Scanner so UUIDs can be read from databases transparently. +// Currently, database types that map to string and []byte are supported. Please +// consult database-specific driver documentation for matching types. +func (uuid *UUID) Scan(src interface{}) error { + switch src := src.(type) { + case nil: + return nil + + case string: + // if an empty UUID comes from a table, we return a null UUID + if src == "" { + return nil + } + + // see Parse for required string format + u, err := Parse(src) + if err != nil { + return fmt.Errorf("Scan: %v", err) + } + + *uuid = u + + case []byte: + // if an empty UUID comes from a table, we return a null UUID + if len(src) == 0 { + return nil + } + + // assumes a simple slice of bytes if 16 bytes + // otherwise attempts to parse + if len(src) != 16 { + return uuid.Scan(string(src)) + } + copy((*uuid)[:], src) + + default: + return fmt.Errorf("Scan: unable to scan type %T into UUID", src) + } + + return nil +} + +// Value implements sql.Valuer so that UUIDs can be written to databases +// transparently. Currently, UUIDs map to strings. Please consult +// database-specific driver documentation for matching types. +func (uuid UUID) Value() (driver.Value, error) { + return uuid.String(), nil +} diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go new file mode 100644 index 000000000..e6ef06cdc --- /dev/null +++ b/vendor/github.com/google/uuid/time.go @@ -0,0 +1,123 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "sync" + "time" +) + +// A Time represents a time as the number of 100's of nanoseconds since 15 Oct +// 1582. +type Time int64 + +const ( + lillian = 2299160 // Julian day of 15 Oct 1582 + unix = 2440587 // Julian day of 1 Jan 1970 + epoch = unix - lillian // Days between epochs + g1582 = epoch * 86400 // seconds between epochs + g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs +) + +var ( + timeMu sync.Mutex + lasttime uint64 // last time we returned + clockSeq uint16 // clock sequence for this run + + timeNow = time.Now // for testing +) + +// UnixTime converts t the number of seconds and nanoseconds using the Unix +// epoch of 1 Jan 1970. +func (t Time) UnixTime() (sec, nsec int64) { + sec = int64(t - g1582ns100) + nsec = (sec % 10000000) * 100 + sec /= 10000000 + return sec, nsec +} + +// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and +// clock sequence as well as adjusting the clock sequence as needed. An error +// is returned if the current time cannot be determined. +func GetTime() (Time, uint16, error) { + defer timeMu.Unlock() + timeMu.Lock() + return getTime() +} + +func getTime() (Time, uint16, error) { + t := timeNow() + + // If we don't have a clock sequence already, set one. + if clockSeq == 0 { + setClockSequence(-1) + } + now := uint64(t.UnixNano()/100) + g1582ns100 + + // If time has gone backwards with this clock sequence then we + // increment the clock sequence + if now <= lasttime { + clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000 + } + lasttime = now + return Time(now), clockSeq, nil +} + +// ClockSequence returns the current clock sequence, generating one if not +// already set. The clock sequence is only used for Version 1 UUIDs. +// +// The uuid package does not use global static storage for the clock sequence or +// the last time a UUID was generated. Unless SetClockSequence is used, a new +// random clock sequence is generated the first time a clock sequence is +// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) +func ClockSequence() int { + defer timeMu.Unlock() + timeMu.Lock() + return clockSequence() +} + +func clockSequence() int { + if clockSeq == 0 { + setClockSequence(-1) + } + return int(clockSeq & 0x3fff) +} + +// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to +// -1 causes a new sequence to be generated. +func SetClockSequence(seq int) { + defer timeMu.Unlock() + timeMu.Lock() + setClockSequence(seq) +} + +func setClockSequence(seq int) { + if seq == -1 { + var b [2]byte + randomBits(b[:]) // clock sequence + seq = int(b[0])<<8 | int(b[1]) + } + oldSeq := clockSeq + clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant + if oldSeq != clockSeq { + lasttime = 0 + } +} + +// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in +// uuid. The time is only defined for version 1 and 2 UUIDs. +func (uuid UUID) Time() Time { + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + return Time(time) +} + +// ClockSequence returns the clock sequence encoded in uuid. +// The clock sequence is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) ClockSequence() int { + return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff +} diff --git a/vendor/github.com/google/uuid/util.go b/vendor/github.com/google/uuid/util.go new file mode 100644 index 000000000..5ea6c7378 --- /dev/null +++ b/vendor/github.com/google/uuid/util.go @@ -0,0 +1,43 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// randomBits completely fills slice b with random data. +func randomBits(b []byte) { + if _, err := io.ReadFull(rander, b); err != nil { + panic(err.Error()) // rand should never fail + } +} + +// xvalues returns the value of a byte as a hexadecimal digit or 255. +var xvalues = [256]byte{ + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, +} + +// xtob converts hex characters x1 and x2 into a byte. +func xtob(x1, x2 byte) (byte, bool) { + b1 := xvalues[x1] + b2 := xvalues[x2] + return (b1 << 4) | b2, b1 != 255 && b2 != 255 +} diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go new file mode 100644 index 000000000..a57207aeb --- /dev/null +++ b/vendor/github.com/google/uuid/uuid.go @@ -0,0 +1,294 @@ +// Copyright 2018 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "io" + "strings" + "sync" +) + +// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC +// 4122. +type UUID [16]byte + +// A Version represents a UUID's version. +type Version byte + +// A Variant represents a UUID's variant. +type Variant byte + +// Constants returned by Variant. +const ( + Invalid = Variant(iota) // Invalid UUID + RFC4122 // The variant specified in RFC4122 + Reserved // Reserved, NCS backward compatibility. + Microsoft // Reserved, Microsoft Corporation backward compatibility. + Future // Reserved for future definition. +) + +const randPoolSize = 16 * 16 + +var ( + rander = rand.Reader // random function + poolEnabled = false + poolMu sync.Mutex + poolPos = randPoolSize // protected with poolMu + pool [randPoolSize]byte // protected with poolMu +) + +type invalidLengthError struct{ len int } + +func (err invalidLengthError) Error() string { + return fmt.Sprintf("invalid UUID length: %d", err.len) +} + +// IsInvalidLengthError is matcher function for custom error invalidLengthError +func IsInvalidLengthError(err error) bool { + _, ok := err.(invalidLengthError) + return ok +} + +// Parse decodes s into a UUID or returns an error. Both the standard UUID +// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the +// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex +// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx. +func Parse(s string) (UUID, error) { + var uuid UUID + switch len(s) { + // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36: + + // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: + if strings.ToLower(s[:9]) != "urn:uuid:" { + return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] + + // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + case 36 + 2: + s = s[1:] + + // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + case 32: + var ok bool + for i := range uuid { + uuid[i], ok = xtob(s[i*2], s[i*2+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, invalidLengthError{len(s)} + } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + v, ok := xtob(s[x], s[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// ParseBytes is like Parse, except it parses a byte slice instead of a string. +func ParseBytes(b []byte) (UUID, error) { + var uuid UUID + switch len(b) { + case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { + return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) + } + b = b[9:] + case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + b = b[1:] + case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + var ok bool + for i := 0; i < 32; i += 2 { + uuid[i/2], ok = xtob(b[i], b[i+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, invalidLengthError{len(b)} + } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + v, ok := xtob(b[x], b[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// MustParse is like Parse but panics if the string cannot be parsed. +// It simplifies safe initialization of global variables holding compiled UUIDs. +func MustParse(s string) UUID { + uuid, err := Parse(s) + if err != nil { + panic(`uuid: Parse(` + s + `): ` + err.Error()) + } + return uuid +} + +// FromBytes creates a new UUID from a byte slice. Returns an error if the slice +// does not have a length of 16. The bytes are copied from the slice. +func FromBytes(b []byte) (uuid UUID, err error) { + err = uuid.UnmarshalBinary(b) + return uuid, err +} + +// Must returns uuid if err is nil and panics otherwise. +func Must(uuid UUID, err error) UUID { + if err != nil { + panic(err) + } + return uuid +} + +// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// , or "" if uuid is invalid. +func (uuid UUID) String() string { + var buf [36]byte + encodeHex(buf[:], uuid) + return string(buf[:]) +} + +// URN returns the RFC 2141 URN form of uuid, +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. +func (uuid UUID) URN() string { + var buf [36 + 9]byte + copy(buf[:], "urn:uuid:") + encodeHex(buf[9:], uuid) + return string(buf[:]) +} + +func encodeHex(dst []byte, uuid UUID) { + hex.Encode(dst, uuid[:4]) + dst[8] = '-' + hex.Encode(dst[9:13], uuid[4:6]) + dst[13] = '-' + hex.Encode(dst[14:18], uuid[6:8]) + dst[18] = '-' + hex.Encode(dst[19:23], uuid[8:10]) + dst[23] = '-' + hex.Encode(dst[24:], uuid[10:]) +} + +// Variant returns the variant encoded in uuid. +func (uuid UUID) Variant() Variant { + switch { + case (uuid[8] & 0xc0) == 0x80: + return RFC4122 + case (uuid[8] & 0xe0) == 0xc0: + return Microsoft + case (uuid[8] & 0xe0) == 0xe0: + return Future + default: + return Reserved + } +} + +// Version returns the version of uuid. +func (uuid UUID) Version() Version { + return Version(uuid[6] >> 4) +} + +func (v Version) String() string { + if v > 15 { + return fmt.Sprintf("BAD_VERSION_%d", v) + } + return fmt.Sprintf("VERSION_%d", v) +} + +func (v Variant) String() string { + switch v { + case RFC4122: + return "RFC4122" + case Reserved: + return "Reserved" + case Microsoft: + return "Microsoft" + case Future: + return "Future" + case Invalid: + return "Invalid" + } + return fmt.Sprintf("BadVariant%d", int(v)) +} + +// SetRand sets the random number generator to r, which implements io.Reader. +// If r.Read returns an error when the package requests random data then +// a panic will be issued. +// +// Calling SetRand with nil sets the random number generator to the default +// generator. +func SetRand(r io.Reader) { + if r == nil { + rander = rand.Reader + return + } + rander = r +} + +// EnableRandPool enables internal randomness pool used for Random +// (Version 4) UUID generation. The pool contains random bytes read from +// the random number generator on demand in batches. Enabling the pool +// may improve the UUID generation throughput significantly. +// +// Since the pool is stored on the Go heap, this feature may be a bad fit +// for security sensitive applications. +// +// Both EnableRandPool and DisableRandPool are not thread-safe and should +// only be called when there is no possibility that New or any other +// UUID Version 4 generation function will be called concurrently. +func EnableRandPool() { + poolEnabled = true +} + +// DisableRandPool disables the randomness pool if it was previously +// enabled with EnableRandPool. +// +// Both EnableRandPool and DisableRandPool are not thread-safe and should +// only be called when there is no possibility that New or any other +// UUID Version 4 generation function will be called concurrently. +func DisableRandPool() { + poolEnabled = false + defer poolMu.Unlock() + poolMu.Lock() + poolPos = randPoolSize +} diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go new file mode 100644 index 000000000..463109629 --- /dev/null +++ b/vendor/github.com/google/uuid/version1.go @@ -0,0 +1,44 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" +) + +// NewUUID returns a Version 1 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewUUID returns nil. If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewUUID returns nil and an error. +// +// In most cases, New should be used. +func NewUUID() (UUID, error) { + var uuid UUID + now, seq, err := GetTime() + if err != nil { + return uuid, err + } + + timeLow := uint32(now & 0xffffffff) + timeMid := uint16((now >> 32) & 0xffff) + timeHi := uint16((now >> 48) & 0x0fff) + timeHi |= 0x1000 // Version 1 + + binary.BigEndian.PutUint32(uuid[0:], timeLow) + binary.BigEndian.PutUint16(uuid[4:], timeMid) + binary.BigEndian.PutUint16(uuid[6:], timeHi) + binary.BigEndian.PutUint16(uuid[8:], seq) + + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + copy(uuid[10:], nodeID[:]) + nodeMu.Unlock() + + return uuid, nil +} diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go new file mode 100644 index 000000000..7697802e4 --- /dev/null +++ b/vendor/github.com/google/uuid/version4.go @@ -0,0 +1,76 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "io" + +// New creates a new random UUID or panics. New is equivalent to +// the expression +// +// uuid.Must(uuid.NewRandom()) +func New() UUID { + return Must(NewRandom()) +} + +// NewString creates a new random UUID and returns it as a string or panics. +// NewString is equivalent to the expression +// +// uuid.New().String() +func NewString() string { + return Must(NewRandom()).String() +} + +// NewRandom returns a Random (Version 4) UUID. +// +// The strength of the UUIDs is based on the strength of the crypto/rand +// package. +// +// Uses the randomness pool if it was enabled with EnableRandPool. +// +// A note about uniqueness derived from the UUID Wikipedia entry: +// +// Randomly generated UUIDs have 122 random bits. One's annual risk of being +// hit by a meteorite is estimated to be one chance in 17 billion, that +// means the probability is about 0.00000000006 (6 × 10−11), +// equivalent to the odds of creating a few tens of trillions of UUIDs in a +// year and having one duplicate. +func NewRandom() (UUID, error) { + if !poolEnabled { + return NewRandomFromReader(rander) + } + return newRandomFromPool() +} + +// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader. +func NewRandomFromReader(r io.Reader) (UUID, error) { + var uuid UUID + _, err := io.ReadFull(r, uuid[:]) + if err != nil { + return Nil, err + } + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} + +func newRandomFromPool() (UUID, error) { + var uuid UUID + poolMu.Lock() + if poolPos == randPoolSize { + _, err := io.ReadFull(rander, pool[:]) + if err != nil { + poolMu.Unlock() + return Nil, err + } + poolPos = 0 + } + copy(uuid[:], pool[poolPos:(poolPos+16)]) + poolPos += 16 + poolMu.Unlock() + + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} diff --git a/vendor/github.com/hashicorp/go-checkpoint/LICENSE b/vendor/github.com/hashicorp/go-checkpoint/LICENSE new file mode 100644 index 000000000..c33dcc7c9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-checkpoint/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-checkpoint/README.md b/vendor/github.com/hashicorp/go-checkpoint/README.md new file mode 100644 index 000000000..e717b6ad3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-checkpoint/README.md @@ -0,0 +1,22 @@ +# Go Checkpoint Client + +[Checkpoint](http://checkpoint.hashicorp.com) is an internal service at +Hashicorp that we use to check version information, broadcast security +bulletins, etc. + +We understand that software making remote calls over the internet +for any reason can be undesirable. Because of this, Checkpoint can be +disabled in all of our software that includes it. You can view the source +of this client to see that we're not sending any private information. + +Each Hashicorp application has it's specific configuration option +to disable checkpoint calls, but the `CHECKPOINT_DISABLE` makes +the underlying checkpoint component itself disabled. For example +in the case of packer: +``` +CHECKPOINT_DISABLE=1 packer build +``` + +**Note:** This repository is probably useless outside of internal HashiCorp +use. It is open source for disclosure and because our open source projects +must be able to link to it. diff --git a/vendor/github.com/hashicorp/go-checkpoint/check.go b/vendor/github.com/hashicorp/go-checkpoint/check.go new file mode 100644 index 000000000..109d0d352 --- /dev/null +++ b/vendor/github.com/hashicorp/go-checkpoint/check.go @@ -0,0 +1,368 @@ +package checkpoint + +import ( + crand "crypto/rand" + "encoding/binary" + "encoding/json" + "fmt" + "io" + "io/ioutil" + mrand "math/rand" + "net/http" + "net/url" + "os" + "path/filepath" + "reflect" + "runtime" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-cleanhttp" +) + +var magicBytes = [4]byte{0x35, 0x77, 0x69, 0xFB} + +// CheckParams are the parameters for configuring a check request. +type CheckParams struct { + // Product and version are used to lookup the correct product and + // alerts for the proper version. The version is also used to perform + // a version check. + Product string + Version string + + // Arch and OS are used to filter alerts potentially only to things + // affecting a specific os/arch combination. If these aren't specified, + // they'll be automatically filled in. + Arch string + OS string + + // Signature is some random signature that should be stored and used + // as a cookie-like value. This ensures that alerts aren't repeated. + // If the signature is changed, repeat alerts may be sent down. The + // signature should NOT be anything identifiable to a user (such as + // a MAC address). It should be random. + // + // If SignatureFile is given, then the signature will be read from this + // file. If the file doesn't exist, then a random signature will + // automatically be generated and stored here. SignatureFile will be + // ignored if Signature is given. + Signature string + SignatureFile string + + // CacheFile, if specified, will cache the result of a check. The + // duration of the cache is specified by CacheDuration, and defaults + // to 48 hours if not specified. If the CacheFile is newer than the + // CacheDuration, than the Check will short-circuit and use those + // results. + // + // If the CacheFile directory doesn't exist, it will be created with + // permissions 0755. + CacheFile string + CacheDuration time.Duration + + // Force, if true, will force the check even if CHECKPOINT_DISABLE + // is set. Within HashiCorp products, this is ONLY USED when the user + // specifically requests it. This is never automatically done without + // the user's consent. + Force bool +} + +// CheckResponse is the response for a check request. +type CheckResponse struct { + Product string `json:"product"` + CurrentVersion string `json:"current_version"` + CurrentReleaseDate int `json:"current_release_date"` + CurrentDownloadURL string `json:"current_download_url"` + CurrentChangelogURL string `json:"current_changelog_url"` + ProjectWebsite string `json:"project_website"` + Outdated bool `json:"outdated"` + Alerts []*CheckAlert `json:"alerts"` +} + +// CheckAlert is a single alert message from a check request. +// +// These never have to be manually constructed, and are typically populated +// into a CheckResponse as a result of the Check request. +type CheckAlert struct { + ID int `json:"id"` + Date int `json:"date"` + Message string `json:"message"` + URL string `json:"url"` + Level string `json:"level"` +} + +// Check checks for alerts and new version information. +func Check(p *CheckParams) (*CheckResponse, error) { + if disabled := os.Getenv("CHECKPOINT_DISABLE"); disabled != "" && !p.Force { + return &CheckResponse{}, nil + } + + // Set a default timeout of 3 sec for the check request (in milliseconds) + timeout := 3000 + if _, err := strconv.Atoi(os.Getenv("CHECKPOINT_TIMEOUT")); err == nil { + timeout, _ = strconv.Atoi(os.Getenv("CHECKPOINT_TIMEOUT")) + } + + // If we have a cached result, then use that + if r, err := checkCache(p.Version, p.CacheFile, p.CacheDuration); err != nil { + return nil, err + } else if r != nil { + defer r.Close() + return checkResult(r) + } + + var u url.URL + + if p.Arch == "" { + p.Arch = runtime.GOARCH + } + if p.OS == "" { + p.OS = runtime.GOOS + } + + // If we're given a SignatureFile, then attempt to read that. + signature := p.Signature + if p.Signature == "" && p.SignatureFile != "" { + var err error + signature, err = checkSignature(p.SignatureFile) + if err != nil { + return nil, err + } + } + + v := u.Query() + v.Set("version", p.Version) + v.Set("arch", p.Arch) + v.Set("os", p.OS) + v.Set("signature", signature) + + u.Scheme = "https" + u.Host = "checkpoint-api.hashicorp.com" + u.Path = fmt.Sprintf("/v1/check/%s", p.Product) + u.RawQuery = v.Encode() + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return nil, err + } + req.Header.Set("Accept", "application/json") + req.Header.Set("User-Agent", "HashiCorp/go-checkpoint") + + client := cleanhttp.DefaultClient() + + // We use a short timeout since checking for new versions is not critical + // enough to block on if checkpoint is broken/slow. + client.Timeout = time.Duration(timeout) * time.Millisecond + + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + return nil, fmt.Errorf("Unknown status: %d", resp.StatusCode) + } + + var r io.Reader = resp.Body + if p.CacheFile != "" { + // Make sure the directory holding our cache exists. + if err := os.MkdirAll(filepath.Dir(p.CacheFile), 0755); err != nil { + return nil, err + } + + // We have to cache the result, so write the response to the + // file as we read it. + f, err := os.Create(p.CacheFile) + if err != nil { + return nil, err + } + + // Write the cache header + if err := writeCacheHeader(f, p.Version); err != nil { + f.Close() + os.Remove(p.CacheFile) + return nil, err + } + + defer f.Close() + r = io.TeeReader(r, f) + } + + return checkResult(r) +} + +// CheckInterval is used to check for a response on a given interval duration. +// The interval is not exact, and checks are randomized to prevent a thundering +// herd. However, it is expected that on average one check is performed per +// interval. The returned channel may be closed to stop background checks. +func CheckInterval(p *CheckParams, interval time.Duration, cb func(*CheckResponse, error)) chan struct{} { + doneCh := make(chan struct{}) + + if disabled := os.Getenv("CHECKPOINT_DISABLE"); disabled != "" { + return doneCh + } + + go func() { + for { + select { + case <-time.After(randomStagger(interval)): + resp, err := Check(p) + cb(resp, err) + case <-doneCh: + return + } + } + }() + + return doneCh +} + +// randomStagger returns an interval that is between 3/4 and 5/4 of +// the given interval. The expected value is the interval. +func randomStagger(interval time.Duration) time.Duration { + stagger := time.Duration(mrand.Int63()) % (interval / 2) + return 3*(interval/4) + stagger +} + +func checkCache(current string, path string, d time.Duration) (io.ReadCloser, error) { + fi, err := os.Stat(path) + if err != nil { + if os.IsNotExist(err) { + // File doesn't exist, not a problem + return nil, nil + } + + return nil, err + } + + if d == 0 { + d = 48 * time.Hour + } + + if fi.ModTime().Add(d).Before(time.Now()) { + // Cache is busted, delete the old file and re-request. We ignore + // errors here because re-creating the file is fine too. + os.Remove(path) + return nil, nil + } + + // File looks good so far, open it up so we can inspect the contents. + f, err := os.Open(path) + if err != nil { + return nil, err + } + + // Check the signature of the file + var sig [4]byte + if err := binary.Read(f, binary.LittleEndian, sig[:]); err != nil { + f.Close() + return nil, err + } + if !reflect.DeepEqual(sig, magicBytes) { + // Signatures don't match. Reset. + f.Close() + return nil, nil + } + + // Check the version. If it changed, then rewrite + var length uint32 + if err := binary.Read(f, binary.LittleEndian, &length); err != nil { + f.Close() + return nil, err + } + data := make([]byte, length) + if _, err := io.ReadFull(f, data); err != nil { + f.Close() + return nil, err + } + if string(data) != current { + // Version changed, reset + f.Close() + return nil, nil + } + + return f, nil +} +func checkResult(r io.Reader) (*CheckResponse, error) { + var result CheckResponse + if err := json.NewDecoder(r).Decode(&result); err != nil { + return nil, err + } + return &result, nil +} + +func checkSignature(path string) (string, error) { + _, err := os.Stat(path) + if err == nil { + // The file exists, read it out + sigBytes, err := ioutil.ReadFile(path) + if err != nil { + return "", err + } + + // Split the file into lines + lines := strings.SplitN(string(sigBytes), "\n", 2) + if len(lines) > 0 { + return strings.TrimSpace(lines[0]), nil + } + } + + // If this isn't a non-exist error, then return that. + if !os.IsNotExist(err) { + return "", err + } + + // The file doesn't exist, so create a signature. + var b [16]byte + n := 0 + for n < 16 { + n2, err := crand.Read(b[n:]) + if err != nil { + return "", err + } + + n += n2 + } + signature := fmt.Sprintf( + "%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:]) + + // Make sure the directory holding our signature exists. + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return "", err + } + + // Write the signature + if err := ioutil.WriteFile(path, []byte(signature+"\n\n"+userMessage+"\n"), 0644); err != nil { + return "", err + } + + return signature, nil +} + +func writeCacheHeader(f io.Writer, v string) error { + // Write our signature first + if err := binary.Write(f, binary.LittleEndian, magicBytes); err != nil { + return err + } + + // Write out our current version length + length := uint32(len(v)) + if err := binary.Write(f, binary.LittleEndian, length); err != nil { + return err + } + + _, err := f.Write([]byte(v)) + return err +} + +// userMessage is suffixed to the signature file to provide feedback. +var userMessage = ` +This signature is a randomly generated UUID used to de-duplicate +alerts and version information. This signature is random, it is +not based on any personally identifiable information. To create +a new signature, you can simply delete this file at any time. +See the documentation for the software using Checkpoint for more +information on how to disable it. +` diff --git a/vendor/github.com/hashicorp/go-checkpoint/telemetry.go b/vendor/github.com/hashicorp/go-checkpoint/telemetry.go new file mode 100644 index 000000000..b9ee62983 --- /dev/null +++ b/vendor/github.com/hashicorp/go-checkpoint/telemetry.go @@ -0,0 +1,118 @@ +package checkpoint + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/http" + "net/url" + "os" + "runtime" + "time" + + "github.com/hashicorp/go-cleanhttp" + uuid "github.com/hashicorp/go-uuid" +) + +// ReportParams are the parameters for configuring a telemetry report. +type ReportParams struct { + // Signature is some random signature that should be stored and used + // as a cookie-like value. This ensures that alerts aren't repeated. + // If the signature is changed, repeat alerts may be sent down. The + // signature should NOT be anything identifiable to a user (such as + // a MAC address). It should be random. + // + // If SignatureFile is given, then the signature will be read from this + // file. If the file doesn't exist, then a random signature will + // automatically be generated and stored here. SignatureFile will be + // ignored if Signature is given. + Signature string `json:"signature"` + SignatureFile string `json:"-"` + + StartTime time.Time `json:"start_time"` + EndTime time.Time `json:"end_time"` + Arch string `json:"arch"` + OS string `json:"os"` + Payload interface{} `json:"payload,omitempty"` + Product string `json:"product"` + RunID string `json:"run_id"` + SchemaVersion string `json:"schema_version"` + Version string `json:"version"` +} + +func (i *ReportParams) signature() string { + signature := i.Signature + if i.Signature == "" && i.SignatureFile != "" { + var err error + signature, err = checkSignature(i.SignatureFile) + if err != nil { + return "" + } + } + return signature +} + +// Report sends telemetry information to checkpoint +func Report(ctx context.Context, r *ReportParams) error { + if disabled := os.Getenv("CHECKPOINT_DISABLE"); disabled != "" { + return nil + } + + req, err := ReportRequest(r) + if err != nil { + return err + } + + client := cleanhttp.DefaultClient() + resp, err := client.Do(req.WithContext(ctx)) + if err != nil { + return err + } + if resp.StatusCode != 201 { + return fmt.Errorf("Unknown status: %d", resp.StatusCode) + } + + return nil +} + +// ReportRequest creates a request object for making a report +func ReportRequest(r *ReportParams) (*http.Request, error) { + // Populate some fields automatically if we can + if r.RunID == "" { + uuid, err := uuid.GenerateUUID() + if err != nil { + return nil, err + } + r.RunID = uuid + } + if r.Arch == "" { + r.Arch = runtime.GOARCH + } + if r.OS == "" { + r.OS = runtime.GOOS + } + if r.Signature == "" { + r.Signature = r.signature() + } + + b, err := json.Marshal(r) + if err != nil { + return nil, err + } + + u := &url.URL{ + Scheme: "https", + Host: "checkpoint-api.hashicorp.com", + Path: fmt.Sprintf("/v1/telemetry/%s", r.Product), + } + + req, err := http.NewRequest("POST", u.String(), bytes.NewReader(b)) + if err != nil { + return nil, err + } + req.Header.Set("Accept", "application/json") + req.Header.Set("User-Agent", "HashiCorp/go-checkpoint") + + return req, nil +} diff --git a/vendor/github.com/hashicorp/go-checkpoint/versions.go b/vendor/github.com/hashicorp/go-checkpoint/versions.go new file mode 100644 index 000000000..a5b0d3b32 --- /dev/null +++ b/vendor/github.com/hashicorp/go-checkpoint/versions.go @@ -0,0 +1,90 @@ +package checkpoint + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + "os" + "strconv" + "time" + + "github.com/hashicorp/go-cleanhttp" +) + +// VersionsParams are the parameters for a versions request. +type VersionsParams struct { + // Service is used to lookup the correct service. + Service string + + // Product is used to filter the version contraints. + Product string + + // Force, if true, will force the check even if CHECKPOINT_DISABLE + // is set. Within HashiCorp products, this is ONLY USED when the user + // specifically requests it. This is never automatically done without + // the user's consent. + Force bool +} + +// VersionsResponse is the response for a versions request. +type VersionsResponse struct { + Service string `json:"service"` + Product string `json:"product"` + Minimum string `json:"minimum"` + Maximum string `json:"maximum"` + Excluding []string `json:"excluding"` +} + +// Versions returns the version constrains for a given service and product. +func Versions(p *VersionsParams) (*VersionsResponse, error) { + if disabled := os.Getenv("CHECKPOINT_DISABLE"); disabled != "" && !p.Force { + return &VersionsResponse{}, nil + } + + // Set a default timeout of 1 sec for the versions request (in milliseconds) + timeout := 1000 + if _, err := strconv.Atoi(os.Getenv("CHECKPOINT_TIMEOUT")); err == nil { + timeout, _ = strconv.Atoi(os.Getenv("CHECKPOINT_TIMEOUT")) + } + + v := url.Values{} + v.Set("product", p.Product) + + u := &url.URL{ + Scheme: "https", + Host: "checkpoint-api.hashicorp.com", + Path: fmt.Sprintf("/v1/versions/%s", p.Service), + RawQuery: v.Encode(), + } + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return nil, err + } + req.Header.Set("Accept", "application/json") + req.Header.Set("User-Agent", "HashiCorp/go-checkpoint") + + client := cleanhttp.DefaultClient() + + // We use a short timeout since checking for new versions is not critical + // enough to block on if checkpoint is broken/slow. + client.Timeout = time.Duration(timeout) * time.Millisecond + + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + return nil, fmt.Errorf("Unknown status: %d", resp.StatusCode) + } + + result := &VersionsResponse{} + if err := json.NewDecoder(resp.Body).Decode(result); err != nil { + return nil, err + } + + return result, nil +} diff --git a/vendor/github.com/hashicorp/go-cleanhttp/LICENSE b/vendor/github.com/hashicorp/go-cleanhttp/LICENSE new file mode 100644 index 000000000..e87a115e4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-cleanhttp/README.md b/vendor/github.com/hashicorp/go-cleanhttp/README.md new file mode 100644 index 000000000..036e5313f --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/README.md @@ -0,0 +1,30 @@ +# cleanhttp + +Functions for accessing "clean" Go http.Client values + +------------- + +The Go standard library contains a default `http.Client` called +`http.DefaultClient`. It is a common idiom in Go code to start with +`http.DefaultClient` and tweak it as necessary, and in fact, this is +encouraged; from the `http` package documentation: + +> The Client's Transport typically has internal state (cached TCP connections), +so Clients should be reused instead of created as needed. Clients are safe for +concurrent use by multiple goroutines. + +Unfortunately, this is a shared value, and it is not uncommon for libraries to +assume that they are free to modify it at will. With enough dependencies, it +can be very easy to encounter strange problems and race conditions due to +manipulation of this shared value across libraries and goroutines (clients are +safe for concurrent use, but writing values to the client struct itself is not +protected). + +Making things worse is the fact that a bare `http.Client` will use a default +`http.Transport` called `http.DefaultTransport`, which is another global value +that behaves the same way. So it is not simply enough to replace +`http.DefaultClient` with `&http.Client{}`. + +This repository provides some simple functions to get a "clean" `http.Client` +-- one that uses the same default values as the Go standard library, but +returns a client that does not share any state with other clients. diff --git a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go new file mode 100644 index 000000000..fe28d15b6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go @@ -0,0 +1,58 @@ +package cleanhttp + +import ( + "net" + "net/http" + "runtime" + "time" +) + +// DefaultTransport returns a new http.Transport with similar default values to +// http.DefaultTransport, but with idle connections and keepalives disabled. +func DefaultTransport() *http.Transport { + transport := DefaultPooledTransport() + transport.DisableKeepAlives = true + transport.MaxIdleConnsPerHost = -1 + return transport +} + +// DefaultPooledTransport returns a new http.Transport with similar default +// values to http.DefaultTransport. Do not use this for transient transports as +// it can leak file descriptors over time. Only use this for transports that +// will be re-used for the same host(s). +func DefaultPooledTransport() *http.Transport { + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + ForceAttemptHTTP2: true, + MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1, + } + return transport +} + +// DefaultClient returns a new http.Client with similar default values to +// http.Client, but with a non-shared Transport, idle connections disabled, and +// keepalives disabled. +func DefaultClient() *http.Client { + return &http.Client{ + Transport: DefaultTransport(), + } +} + +// DefaultPooledClient returns a new http.Client with similar default values to +// http.Client, but with a shared Transport. Do not use this function for +// transient clients as it can leak file descriptors over time. Only use this +// for clients that will be re-used for the same host(s). +func DefaultPooledClient() *http.Client { + return &http.Client{ + Transport: DefaultPooledTransport(), + } +} diff --git a/vendor/github.com/hashicorp/go-cleanhttp/doc.go b/vendor/github.com/hashicorp/go-cleanhttp/doc.go new file mode 100644 index 000000000..05841092a --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/doc.go @@ -0,0 +1,20 @@ +// Package cleanhttp offers convenience utilities for acquiring "clean" +// http.Transport and http.Client structs. +// +// Values set on http.DefaultClient and http.DefaultTransport affect all +// callers. This can have detrimental effects, esepcially in TLS contexts, +// where client or root certificates set to talk to multiple endpoints can end +// up displacing each other, leading to hard-to-debug issues. This package +// provides non-shared http.Client and http.Transport structs to ensure that +// the configuration will not be overwritten by other parts of the application +// or dependencies. +// +// The DefaultClient and DefaultTransport functions disable idle connections +// and keepalives. Without ensuring that idle connections are closed before +// garbage collection, short-term clients/transports can leak file descriptors, +// eventually leading to "too many open files" errors. If you will be +// connecting to the same hosts repeatedly from the same client, you can use +// DefaultPooledClient to receive a client that has connection pooling +// semantics similar to http.DefaultClient. +// +package cleanhttp diff --git a/vendor/github.com/hashicorp/go-cleanhttp/handlers.go b/vendor/github.com/hashicorp/go-cleanhttp/handlers.go new file mode 100644 index 000000000..3c845dc0d --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/handlers.go @@ -0,0 +1,48 @@ +package cleanhttp + +import ( + "net/http" + "strings" + "unicode" +) + +// HandlerInput provides input options to cleanhttp's handlers +type HandlerInput struct { + ErrStatus int +} + +// PrintablePathCheckHandler is a middleware that ensures the request path +// contains only printable runes. +func PrintablePathCheckHandler(next http.Handler, input *HandlerInput) http.Handler { + // Nil-check on input to make it optional + if input == nil { + input = &HandlerInput{ + ErrStatus: http.StatusBadRequest, + } + } + + // Default to http.StatusBadRequest on error + if input.ErrStatus == 0 { + input.ErrStatus = http.StatusBadRequest + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r != nil { + // Check URL path for non-printable characters + idx := strings.IndexFunc(r.URL.Path, func(c rune) bool { + return !unicode.IsPrint(c) + }) + + if idx != -1 { + w.WriteHeader(input.ErrStatus) + return + } + + if next != nil { + next.ServeHTTP(w, r) + } + } + + return + }) +} diff --git a/vendor/github.com/hashicorp/hc-install/.go-version b/vendor/github.com/hashicorp/hc-install/.go-version new file mode 100644 index 000000000..b9a05a6dc --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/.go-version @@ -0,0 +1 @@ +1.17.3 diff --git a/vendor/github.com/hashicorp/hc-install/.goreleaser.yml b/vendor/github.com/hashicorp/hc-install/.goreleaser.yml new file mode 100644 index 000000000..5e5832867 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/.goreleaser.yml @@ -0,0 +1,29 @@ +project_name: tfinstall +builds: + - env: + - CGO_ENABLED=0 + main: ./cmd/hcinstall/main.go + mod_timestamp: '{{ .CommitTimestamp }}' + id: "tfinstall" + binary: tfinstall + flags: + - -trimpath + ldflags: + - '-s -w -X main.version={{.Version}} -X main.commit={{.Commit}}' + goos: + - linux + - darwin + - windows + goarch: + - amd64 + - arm + - arm64 +archives: + - files: [] + format: zip + name_template: '{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}' +checksum: + name_template: '{{ .ProjectName }}_{{ .Version }}_SHA256SUMS' + algorithm: sha256 +changelog: + skip: true diff --git a/vendor/github.com/hashicorp/hc-install/LICENSE b/vendor/github.com/hashicorp/hc-install/LICENSE new file mode 100644 index 000000000..a612ad981 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/hc-install/README.md b/vendor/github.com/hashicorp/hc-install/README.md new file mode 100644 index 000000000..87c06a203 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/README.md @@ -0,0 +1,133 @@ +# hc-install + +An **experimental** Go module for downloading or locating HashiCorp binaries, verifying signatures and checksums, and asserting version constraints. + +This module is a successor to tfinstall, available in pre-1.0 versions of [terraform-exec](https://github.com/hashicorp/terraform-exec). Current users of tfinstall are advised to move to hc-install before upgrading terraform-exec to v1.0.0. + +## hc-install is not a package manager + +This library is intended for use within Go programs or automated environments (such as CIs) +which have some business downloading or otherwise locating HashiCorp binaries. + +The included command-line utility, `hc-install`, is a convenient way of using +the library in ad-hoc or CI shell scripting outside of Go. + +`hc-install` does **not**: + + - Determine suitable installation path based on target system. e.g. in `/usr/bin` or `/usr/local/bin` on Unix based system. + - Deal with execution of installed binaries (via service files or otherwise). + - Upgrade existing binaries on your system. + - Add nor link downloaded binaries to your `$PATH`. + +## API + +The `Installer` offers a few high-level methods: + + - `Ensure(context.Context, []src.Source)` to find, install, or build a product version + - `Install(context.Context, []src.Installable)` to install a product version + +### Sources + +The `Installer` methods accept number of different `Source` types. +Each comes with different trade-offs described below. + + - `fs.{AnyVersion,ExactVersion}` - Finds a binary in `$PATH` (or additional paths) + - **Pros:** + - This is most convenient when you already have the product installed on your system + which you already manage. + - **Cons:** + - Only relies on a single version, expects _you_ to manage the installation + - _Not recommended_ for any environment where product installation is not controlled or managed by you (e.g. default GitHub Actions image managed by GitHub) + - `releases.{LatestVersion,ExactVersion}` - Downloads, verifies & installs any known product from `releases.hashicorp.com` + - **Pros:** + - Fast and reliable way of obtaining any pre-built version of any product + - **Cons:** + - Installation may consume some bandwith, disk space and a little time + - Potentially less stable builds (see `checkpoint` below) + - `checkpoint.{LatestVersion}` - Downloads, verifies & installs any known product available in HashiCorp Checkpoint + - **Pros:** + - Checkpoint typically contains only product versions considered stable + - **Cons:** + - Installation may consume some bandwith, disk space and a little time + - Currently doesn't allow installation of a old versions (see `releases` above) + - `build.{GitRevision}` - Clones raw source code and builds the product from it + - **Pros:** + - Useful for catching bugs and incompatibilities as early as possible (prior to product release). + - **Cons:** + - Building from scratch can consume significant amount of time & resources (CPU, memory, bandwith, disk space) + - There are no guarantees that build instructions will always be up-to-date + - There's increased likelihood of build containing bugs prior to release + - Any CI builds relying on this are likely to be fragile + +## Example Usage + +### Install single version + +```go +TODO +``` + +### Find or install single version + +```go +i := NewInstaller() + +v0_14_0 := version.Must(version.NewVersion("0.14.0")) + +execPath, err := i.Ensure(context.Background(), []src.Source{ + &fs.ExactVersion{ + Product: product.Terraform, + Version: v0_14_0, + }, + &releases.ExactVersion{ + Product: product.Terraform, + Version: v0_14_0, + }, +}) +if err != nil { + // process err +} + +// run any tests + +defer i.Remove() +``` + +### Install multiple versions + +```go +TODO +``` + +### Install and build multiple versions + +```go +i := NewInstaller() + +vc, _ := version.NewConstraint(">= 0.12") +rv := &releases.Versions{ + Product: product.Terraform, + Constraints: vc, +} + +versions, err := rv.List(context.Background()) +if err != nil { + return err +} +versions = append(versions, &build.GitRevision{Ref: "HEAD"}) + +for _, installableVersion := range versions { + execPath, err := i.Ensure(context.Background(), []src.Source{ + installableVersion, + }) + if err != nil { + return err + } + + // Do some testing here + _ = execPath + + // clean up + os.Remove(execPath) +} +``` diff --git a/vendor/github.com/hashicorp/hc-install/checkpoint/latest_version.go b/vendor/github.com/hashicorp/hc-install/checkpoint/latest_version.go new file mode 100644 index 000000000..04fa24160 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/checkpoint/latest_version.go @@ -0,0 +1,154 @@ +package checkpoint + +import ( + "context" + "fmt" + "io/ioutil" + "log" + "os" + "path/filepath" + "runtime" + "time" + + checkpoint "github.com/hashicorp/go-checkpoint" + "github.com/hashicorp/go-version" + "github.com/hashicorp/hc-install/internal/pubkey" + rjson "github.com/hashicorp/hc-install/internal/releasesjson" + isrc "github.com/hashicorp/hc-install/internal/src" + "github.com/hashicorp/hc-install/internal/validators" + "github.com/hashicorp/hc-install/product" +) + +var ( + defaultTimeout = 30 * time.Second + discardLogger = log.New(ioutil.Discard, "", 0) +) + +// LatestVersion installs the latest version known to Checkpoint +// to OS temp directory, or to InstallDir (if not empty) +type LatestVersion struct { + Product product.Product + Timeout time.Duration + SkipChecksumVerification bool + InstallDir string + + // ArmoredPublicKey is a public PGP key in ASCII/armor format to use + // instead of built-in pubkey to verify signature of downloaded checksums + ArmoredPublicKey string + + logger *log.Logger + pathsToRemove []string +} + +func (*LatestVersion) IsSourceImpl() isrc.InstallSrcSigil { + return isrc.InstallSrcSigil{} +} + +func (lv *LatestVersion) SetLogger(logger *log.Logger) { + lv.logger = logger +} + +func (lv *LatestVersion) log() *log.Logger { + if lv.logger == nil { + return discardLogger + } + return lv.logger +} + +func (lv *LatestVersion) Validate() error { + if !validators.IsProductNameValid(lv.Product.Name) { + return fmt.Errorf("invalid product name: %q", lv.Product.Name) + } + if !validators.IsBinaryNameValid(lv.Product.BinaryName()) { + return fmt.Errorf("invalid binary name: %q", lv.Product.BinaryName()) + } + + return nil +} + +func (lv *LatestVersion) Install(ctx context.Context) (string, error) { + timeout := defaultTimeout + if lv.Timeout > 0 { + timeout = lv.Timeout + } + ctx, cancelFunc := context.WithTimeout(ctx, timeout) + defer cancelFunc() + + // TODO: Introduce CheckWithContext to allow for cancellation + resp, err := checkpoint.Check(&checkpoint.CheckParams{ + Product: lv.Product.Name, + OS: runtime.GOOS, + Arch: runtime.GOARCH, + Force: true, + }) + if err != nil { + return "", err + } + + latestVersion, err := version.NewVersion(resp.CurrentVersion) + if err != nil { + return "", err + } + + if lv.pathsToRemove == nil { + lv.pathsToRemove = make([]string, 0) + } + + dstDir := lv.InstallDir + if dstDir == "" { + var err error + dirName := fmt.Sprintf("%s_*", lv.Product.Name) + dstDir, err = ioutil.TempDir("", dirName) + if err != nil { + return "", err + } + lv.pathsToRemove = append(lv.pathsToRemove, dstDir) + lv.log().Printf("created new temp dir at %s", dstDir) + } + lv.log().Printf("will install into dir at %s", dstDir) + + rels := rjson.NewReleases() + rels.SetLogger(lv.log()) + pv, err := rels.GetProductVersion(ctx, lv.Product.Name, latestVersion) + if err != nil { + return "", err + } + + d := &rjson.Downloader{ + Logger: lv.log(), + VerifyChecksum: !lv.SkipChecksumVerification, + ArmoredPublicKey: pubkey.DefaultPublicKey, + BaseURL: rels.BaseURL, + } + if lv.ArmoredPublicKey != "" { + d.ArmoredPublicKey = lv.ArmoredPublicKey + } + err = d.DownloadAndUnpack(ctx, pv, dstDir) + if err != nil { + return "", err + } + + execPath := filepath.Join(dstDir, lv.Product.BinaryName()) + + lv.pathsToRemove = append(lv.pathsToRemove, execPath) + + lv.log().Printf("changing perms of %s", execPath) + err = os.Chmod(execPath, 0o700) + if err != nil { + return "", err + } + + return execPath, nil +} + +func (lv *LatestVersion) Remove(ctx context.Context) error { + if lv.pathsToRemove != nil { + for _, path := range lv.pathsToRemove { + err := os.RemoveAll(path) + if err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/hashicorp/hc-install/errors/errors.go b/vendor/github.com/hashicorp/hc-install/errors/errors.go new file mode 100644 index 000000000..8d4f1d22d --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/errors/errors.go @@ -0,0 +1,18 @@ +package errors + +type skippableErr struct { + Err error +} + +func (e skippableErr) Error() string { + return e.Err.Error() +} + +func SkippableErr(err error) skippableErr { + return skippableErr{Err: err} +} + +func IsErrorSkippable(err error) bool { + _, ok := err.(skippableErr) + return ok +} diff --git a/vendor/github.com/hashicorp/hc-install/fs/any_version.go b/vendor/github.com/hashicorp/hc-install/fs/any_version.go new file mode 100644 index 000000000..fc1f94634 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/fs/any_version.go @@ -0,0 +1,95 @@ +package fs + +import ( + "context" + "fmt" + "log" + "path/filepath" + + "github.com/hashicorp/hc-install/errors" + "github.com/hashicorp/hc-install/internal/src" + "github.com/hashicorp/hc-install/internal/validators" + "github.com/hashicorp/hc-install/product" +) + +// AnyVersion finds an executable binary of any version +// either defined by ExactBinPath, or as part of Product. +// +// When ExactBinPath is used, the source is skipped when +// the binary is not found or accessible/executable. +// +// When Product is used, binary name is looked up within system $PATH +// and any declared ExtraPaths (which are *appended* to +// any directories in $PATH). Source is skipped if no binary +// is found or accessible/executable. +type AnyVersion struct { + // Product represents the product (its binary name to look up), + // conflicts with ExactBinPath + Product *product.Product + + // ExtraPaths represents additional dir paths to be appended to + // the default system $PATH, conflicts with ExactBinPath + ExtraPaths []string + + // ExactBinPath represents exact path to the binary, + // conflicts with Product and ExtraPaths + ExactBinPath string + + logger *log.Logger +} + +func (*AnyVersion) IsSourceImpl() src.InstallSrcSigil { + return src.InstallSrcSigil{} +} + +func (av *AnyVersion) Validate() error { + if av.ExactBinPath == "" && av.Product == nil { + return fmt.Errorf("must use either ExactBinPath or Product + ExtraPaths") + } + if av.ExactBinPath != "" && (av.Product != nil || len(av.ExtraPaths) > 0) { + return fmt.Errorf("use either ExactBinPath or Product + ExtraPaths, not both") + } + if av.ExactBinPath != "" && !filepath.IsAbs(av.ExactBinPath) { + return fmt.Errorf("expected ExactBinPath (%q) to be an absolute path", av.ExactBinPath) + } + if av.Product != nil && !validators.IsBinaryNameValid(av.Product.BinaryName()) { + return fmt.Errorf("invalid binary name: %q", av.Product.BinaryName()) + } + return nil +} + +func (av *AnyVersion) SetLogger(logger *log.Logger) { + av.logger = logger +} + +func (av *AnyVersion) log() *log.Logger { + if av.logger == nil { + return discardLogger + } + return av.logger +} + +func (av *AnyVersion) Find(ctx context.Context) (string, error) { + if av.ExactBinPath != "" { + err := checkExecutable(av.ExactBinPath) + if err != nil { + return "", errors.SkippableErr(err) + } + + return av.ExactBinPath, nil + } + + execPath, err := findFile(lookupDirs(av.ExtraPaths), av.Product.BinaryName(), checkExecutable) + if err != nil { + return "", errors.SkippableErr(err) + } + + if !filepath.IsAbs(execPath) { + var err error + execPath, err = filepath.Abs(execPath) + if err != nil { + return "", errors.SkippableErr(err) + } + } + return execPath, nil +} diff --git a/vendor/github.com/hashicorp/hc-install/fs/exact_version.go b/vendor/github.com/hashicorp/hc-install/fs/exact_version.go new file mode 100644 index 000000000..018c1fbad --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/fs/exact_version.go @@ -0,0 +1,95 @@ +package fs + +import ( + "context" + "fmt" + "log" + "path/filepath" + "time" + + "github.com/hashicorp/go-version" + "github.com/hashicorp/hc-install/errors" + "github.com/hashicorp/hc-install/internal/src" + "github.com/hashicorp/hc-install/internal/validators" + "github.com/hashicorp/hc-install/product" +) + +// ExactVersion finds the first executable binary of the product name +// which matches the Version within system $PATH and any declared ExtraPaths +// (which are *appended* to any directories in $PATH) +type ExactVersion struct { + Product product.Product + Version *version.Version + ExtraPaths []string + Timeout time.Duration + + logger *log.Logger +} + +func (*ExactVersion) IsSourceImpl() src.InstallSrcSigil { + return src.InstallSrcSigil{} +} + +func (ev *ExactVersion) SetLogger(logger *log.Logger) { + ev.logger = logger +} + +func (ev *ExactVersion) log() *log.Logger { + if ev.logger == nil { + return discardLogger + } + return ev.logger +} + +func (ev *ExactVersion) Validate() error { + if !validators.IsBinaryNameValid(ev.Product.BinaryName()) { + return fmt.Errorf("invalid binary name: %q", ev.Product.BinaryName()) + } + if ev.Version == nil { + return fmt.Errorf("undeclared version") + } + if ev.Product.GetVersion == nil { + return fmt.Errorf("undeclared version getter") + } + return nil +} + +func (ev *ExactVersion) Find(ctx context.Context) (string, error) { + timeout := defaultTimeout + if ev.Timeout > 0 { + timeout = ev.Timeout + } + ctx, cancelFunc := context.WithTimeout(ctx, timeout) + defer cancelFunc() + + execPath, err := findFile(lookupDirs(ev.ExtraPaths), ev.Product.BinaryName(), func(file string) error { + err := checkExecutable(file) + if err != nil { + return err + } + + v, err := ev.Product.GetVersion(ctx, file) + if err != nil { + return err + } + + if !ev.Version.Equal(v) { + return fmt.Errorf("version (%s) doesn't match %s", v, ev.Version) + } + + return nil + }) + if err != nil { + return "", errors.SkippableErr(err) + } + + if !filepath.IsAbs(execPath) { + var err error + execPath, err = filepath.Abs(execPath) + if err != nil { + return "", errors.SkippableErr(err) + } + } + + return execPath, nil +} diff --git a/vendor/github.com/hashicorp/hc-install/fs/fs.go b/vendor/github.com/hashicorp/hc-install/fs/fs.go new file mode 100644 index 000000000..5adb9c329 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/fs/fs.go @@ -0,0 +1,14 @@ +package fs + +import ( + "io/ioutil" + "log" + "time" +) + +var ( + defaultTimeout = 10 * time.Second + discardLogger = log.New(ioutil.Discard, "", 0) +) + +type fileCheckFunc func(path string) error diff --git a/vendor/github.com/hashicorp/hc-install/fs/fs_unix.go b/vendor/github.com/hashicorp/hc-install/fs/fs_unix.go new file mode 100644 index 000000000..95c5c11f1 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/fs/fs_unix.go @@ -0,0 +1,45 @@ +//go:build !windows +// +build !windows + +package fs + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" +) + +func lookupDirs(extraDirs []string) []string { + pathVar := os.Getenv("PATH") + dirs := filepath.SplitList(pathVar) + for _, ep := range extraDirs { + dirs = append(dirs, ep) + } + return dirs +} + +func findFile(dirs []string, file string, f fileCheckFunc) (string, error) { + for _, dir := range dirs { + if dir == "" { + // Unix shell semantics: path element "" means "." + dir = "." + } + path := filepath.Join(dir, file) + if err := f(path); err == nil { + return path, nil + } + } + return "", fmt.Errorf("%s: %w", file, exec.ErrNotFound) +} + +func checkExecutable(file string) error { + d, err := os.Stat(file) + if err != nil { + return err + } + if m := d.Mode(); !m.IsDir() && m&0111 != 0 { + return nil + } + return os.ErrPermission +} diff --git a/vendor/github.com/hashicorp/hc-install/fs/fs_windows.go b/vendor/github.com/hashicorp/hc-install/fs/fs_windows.go new file mode 100644 index 000000000..2a03c7ad2 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/fs/fs_windows.go @@ -0,0 +1,81 @@ +package fs + +import ( + "fmt" + "io/fs" + "os" + "os/exec" + "path/filepath" + "strings" +) + +func lookupDirs(extraDirs []string) []string { + pathVar := os.Getenv("path") + dirs := filepath.SplitList(pathVar) + for _, ep := range extraDirs { + dirs = append(dirs, ep) + } + return dirs +} + +func findFile(dirs []string, file string, f fileCheckFunc) (string, error) { + for _, dir := range dirs { + path := filepath.Join(dir, file) + if err := f(path); err == nil { + return path, nil + } + } + return "", fmt.Errorf("%s: %w", file, exec.ErrNotFound) +} + +func checkExecutable(file string) error { + var exts []string + x := os.Getenv(`PATHEXT`) + if x != "" { + for _, e := range strings.Split(strings.ToLower(x), `;`) { + if e == "" { + continue + } + if e[0] != '.' { + e = "." + e + } + exts = append(exts, e) + } + } else { + exts = []string{".com", ".exe", ".bat", ".cmd"} + } + + if len(exts) == 0 { + return chkStat(file) + } + if hasExt(file) { + if chkStat(file) == nil { + return nil + } + } + for _, e := range exts { + if f := file + e; chkStat(f) == nil { + return nil + } + } + return fs.ErrNotExist +} + +func chkStat(file string) error { + d, err := os.Stat(file) + if err != nil { + return err + } + if d.IsDir() { + return fs.ErrPermission + } + return nil +} + +func hasExt(file string) bool { + i := strings.LastIndex(file, ".") + if i < 0 { + return false + } + return strings.LastIndexAny(file, `:\/`) < i +} diff --git a/vendor/github.com/hashicorp/hc-install/installer.go b/vendor/github.com/hashicorp/hc-install/installer.go new file mode 100644 index 000000000..8b773c56d --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/installer.go @@ -0,0 +1,154 @@ +package install + +import ( + "context" + "fmt" + "io/ioutil" + "log" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/hc-install/errors" + "github.com/hashicorp/hc-install/src" +) + +type Installer struct { + logger *log.Logger + + removableSources []src.Removable +} + +type RemoveFunc func(ctx context.Context) error + +func NewInstaller() *Installer { + discardLogger := log.New(ioutil.Discard, "", 0) + return &Installer{ + logger: discardLogger, + } +} + +func (i *Installer) SetLogger(logger *log.Logger) { + i.logger = logger +} + +func (i *Installer) Ensure(ctx context.Context, sources []src.Source) (string, error) { + var errs *multierror.Error + + for _, source := range sources { + if srcWithLogger, ok := source.(src.LoggerSettable); ok { + srcWithLogger.SetLogger(i.logger) + } + + if srcValidatable, ok := source.(src.Validatable); ok { + err := srcValidatable.Validate() + if err != nil { + errs = multierror.Append(errs, err) + } + } + } + + if errs.ErrorOrNil() != nil { + return "", errs + } + + i.removableSources = make([]src.Removable, 0) + + for _, source := range sources { + if s, ok := source.(src.Removable); ok { + i.removableSources = append(i.removableSources, s) + } + + switch s := source.(type) { + case src.Findable: + execPath, err := s.Find(ctx) + if err != nil { + if errors.IsErrorSkippable(err) { + errs = multierror.Append(errs, err) + continue + } + return "", err + } + + return execPath, nil + case src.Installable: + execPath, err := s.Install(ctx) + if err != nil { + if errors.IsErrorSkippable(err) { + errs = multierror.Append(errs, err) + continue + } + return "", err + } + + return execPath, nil + case src.Buildable: + execPath, err := s.Build(ctx) + if err != nil { + if errors.IsErrorSkippable(err) { + errs = multierror.Append(errs, err) + continue + } + return "", err + } + + return execPath, nil + default: + return "", fmt.Errorf("unknown source: %T", s) + } + } + + return "", fmt.Errorf("unable to find, install, or build from %d sources: %s", + len(sources), errs.ErrorOrNil()) +} + +func (i *Installer) Install(ctx context.Context, sources []src.Installable) (string, error) { + var errs *multierror.Error + + i.removableSources = make([]src.Removable, 0) + + for _, source := range sources { + if srcWithLogger, ok := source.(src.LoggerSettable); ok { + srcWithLogger.SetLogger(i.logger) + } + + if srcValidatable, ok := source.(src.Validatable); ok { + err := srcValidatable.Validate() + if err != nil { + errs = multierror.Append(errs, err) + continue + } + } + + if s, ok := source.(src.Removable); ok { + i.removableSources = append(i.removableSources, s) + } + + execPath, err := source.Install(ctx) + if err != nil { + if errors.IsErrorSkippable(err) { + errs = multierror.Append(errs, err) + continue + } + return "", err + } + + return execPath, nil + } + + return "", fmt.Errorf("unable install from %d sources: %s", + len(sources), errs.ErrorOrNil()) +} + +func (i *Installer) Remove(ctx context.Context) error { + var errs *multierror.Error + + if i.removableSources != nil { + for _, rs := range i.removableSources { + err := rs.Remove(ctx) + if err != nil { + errs = multierror.Append(errs, err) + } + } + } + + return errs.ErrorOrNil() +} diff --git a/vendor/github.com/hashicorp/hc-install/internal/build/get_go_version.go b/vendor/github.com/hashicorp/hc-install/internal/build/get_go_version.go new file mode 100644 index 000000000..3a929859a --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/internal/build/get_go_version.go @@ -0,0 +1,37 @@ +package build + +import ( + "context" + "fmt" + "os/exec" + "regexp" + "strings" + + "github.com/hashicorp/go-version" +) + +// GetGoVersion obtains version of locally installed Go via "go version" +func GetGoVersion(ctx context.Context) (*version.Version, error) { + cmd := exec.CommandContext(ctx, "go", "version") + out, err := cmd.CombinedOutput() + if err != nil { + return nil, fmt.Errorf("unable to build: %w\n%s", err, out) + } + + output := strings.TrimSpace(string(out)) + + // e.g. "go version go1.15" + re := regexp.MustCompile(`^go version go([0-9.]+)\s+`) + matches := re.FindStringSubmatch(output) + if len(matches) != 2 { + return nil, fmt.Errorf("unexpected go version output: %q", output) + } + + rawGoVersion := matches[1] + v, err := version.NewVersion(rawGoVersion) + if err != nil { + return nil, fmt.Errorf("unexpected go version output: %w", err) + } + + return v, nil +} diff --git a/vendor/github.com/hashicorp/hc-install/internal/build/go_build.go b/vendor/github.com/hashicorp/hc-install/internal/build/go_build.go new file mode 100644 index 000000000..2f3f83254 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/internal/build/go_build.go @@ -0,0 +1,123 @@ +package build + +import ( + "bytes" + "context" + "fmt" + "io/ioutil" + "log" + "os" + "os/exec" + "path/filepath" + + "github.com/hashicorp/go-version" +) + +var discardLogger = log.New(ioutil.Discard, "", 0) + +// GoBuild represents a Go builder (to run "go build") +type GoBuild struct { + Version *version.Version + DetectVendoring bool + + pathToRemove string + logger *log.Logger +} + +func (gb *GoBuild) SetLogger(logger *log.Logger) { + gb.logger = logger +} + +func (gb *GoBuild) log() *log.Logger { + if gb.logger == nil { + return discardLogger + } + return gb.logger +} + +// Build runs "go build" within a given repo to produce binaryName in targetDir +func (gb *GoBuild) Build(ctx context.Context, repoDir, targetDir, binaryName string) (string, error) { + goCmd, cleanupFunc, err := gb.ensureRequiredGoVersion(ctx, repoDir) + if err != nil { + return "", err + } + defer cleanupFunc(ctx) + + goArgs := []string{"build", "-o", filepath.Join(targetDir, binaryName)} + + if gb.DetectVendoring { + vendorDir := filepath.Join(repoDir, "vendor") + if fi, err := os.Stat(vendorDir); err == nil && fi.IsDir() { + goArgs = append(goArgs, "-mod", "vendor") + } + } + + gb.log().Printf("executing %s %q in %q", goCmd, goArgs, repoDir) + cmd := exec.CommandContext(ctx, goCmd, goArgs...) + cmd.Dir = repoDir + out, err := cmd.CombinedOutput() + if err != nil { + return "", fmt.Errorf("unable to build: %w\n%s", err, out) + } + + binPath := filepath.Join(targetDir, binaryName) + + gb.pathToRemove = binPath + + return binPath, nil +} + +func (gb *GoBuild) Remove(ctx context.Context) error { + return os.RemoveAll(gb.pathToRemove) +} + +func (gb *GoBuild) ensureRequiredGoVersion(ctx context.Context, repoDir string) (string, CleanupFunc, error) { + cmdName := "go" + noopCleanupFunc := func(context.Context) {} + + if gb.Version != nil { + goVersion, err := GetGoVersion(ctx) + if err != nil { + return cmdName, noopCleanupFunc, err + } + + if !goVersion.GreaterThanOrEqual(gb.Version) { + // found incompatible version, try downloading the desired one + return gb.installGoVersion(ctx, gb.Version) + } + } + + if requiredVersion, ok := guessRequiredGoVersion(repoDir); ok { + goVersion, err := GetGoVersion(ctx) + if err != nil { + return cmdName, noopCleanupFunc, err + } + + if !goVersion.GreaterThanOrEqual(requiredVersion) { + // found incompatible version, try downloading the desired one + return gb.installGoVersion(ctx, requiredVersion) + } + } + + return cmdName, noopCleanupFunc, nil +} + +// CleanupFunc represents a function to be called once Go is no longer needed +// e.g. to remove any version installed temporarily per requirements +type CleanupFunc func(context.Context) + +func guessRequiredGoVersion(repoDir string) (*version.Version, bool) { + goEnvFile := filepath.Join(repoDir, ".go-version") + if fi, err := os.Stat(goEnvFile); err == nil && !fi.IsDir() { + b, err := ioutil.ReadFile(goEnvFile) + if err != nil { + return nil, false + } + requiredVersion, err := version.NewVersion(string(bytes.TrimSpace(b))) + if err != nil { + return nil, false + } + return requiredVersion, true + } + return nil, false +} diff --git a/vendor/github.com/hashicorp/hc-install/internal/build/go_is_installed.go b/vendor/github.com/hashicorp/hc-install/internal/build/go_is_installed.go new file mode 100644 index 000000000..6a81d196b --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/internal/build/go_is_installed.go @@ -0,0 +1,28 @@ +package build + +import ( + "context" + "fmt" + + "github.com/hashicorp/go-version" +) + +// GoIsInstalled represents a checker of whether Go is installed locally +type GoIsInstalled struct { + RequiredVersion version.Constraints +} + +// Check checks whether any Go version is installed locally +func (gii *GoIsInstalled) Check(ctx context.Context) error { + goVersion, err := GetGoVersion(ctx) + if err != nil { + return err + } + + if gii.RequiredVersion != nil && !gii.RequiredVersion.Check(goVersion) { + return fmt.Errorf("go %s required (%s available)", + gii.RequiredVersion, goVersion) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/hc-install/internal/build/install_go_version.go b/vendor/github.com/hashicorp/hc-install/internal/build/install_go_version.go new file mode 100644 index 000000000..f97c859dc --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/internal/build/install_go_version.go @@ -0,0 +1,53 @@ +package build + +import ( + "context" + "fmt" + "os" + "os/exec" + "strings" + + "github.com/hashicorp/go-version" +) + +// installGoVersion installs given version of Go using Go +// according to https://golang.org/doc/manage-install +func (gb *GoBuild) installGoVersion(ctx context.Context, v *version.Version) (string, CleanupFunc, error) { + // trim 0 patch versions as that's how Go does it :shrug: + shortVersion := strings.TrimSuffix(v.String(), ".0") + + pkgURL := fmt.Sprintf("golang.org/dl/go%s", shortVersion) + + gb.log().Printf("go getting %q", pkgURL) + cmd := exec.CommandContext(ctx, "go", "get", pkgURL) + out, err := cmd.CombinedOutput() + if err != nil { + return "", nil, fmt.Errorf("unable to install Go %s: %w\n%s", v, err, out) + } + + cmdName := fmt.Sprintf("go%s", shortVersion) + + gb.log().Printf("downloading go %q", shortVersion) + cmd = exec.CommandContext(ctx, cmdName, "download") + out, err = cmd.CombinedOutput() + if err != nil { + return "", nil, fmt.Errorf("unable to download Go %s: %w\n%s", v, err, out) + } + gb.log().Printf("download of go %q finished", shortVersion) + + cleanupFunc := func(ctx context.Context) { + cmd = exec.CommandContext(ctx, cmdName, "env", "GOROOT") + out, err = cmd.CombinedOutput() + if err != nil { + return + } + rootPath := strings.TrimSpace(string(out)) + + // run some extra checks before deleting, just to be sure + if rootPath != "" && strings.HasSuffix(rootPath, v.String()) { + os.RemoveAll(rootPath) + } + } + + return cmdName, cleanupFunc, nil +} diff --git a/vendor/github.com/hashicorp/hc-install/internal/httpclient/httpclient.go b/vendor/github.com/hashicorp/hc-install/internal/httpclient/httpclient.go new file mode 100644 index 000000000..159f70505 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/internal/httpclient/httpclient.go @@ -0,0 +1,37 @@ +package httpclient + +import ( + "fmt" + "net/http" + + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/hc-install/internal/version" +) + +// NewHTTPClient provides a pre-configured http.Client +// e.g. with relevant User-Agent header +func NewHTTPClient() *http.Client { + client := cleanhttp.DefaultClient() + + userAgent := fmt.Sprintf("hc-install/%s", version.ModuleVersion()) + + cli := cleanhttp.DefaultPooledClient() + cli.Transport = &userAgentRoundTripper{ + userAgent: userAgent, + inner: cli.Transport, + } + + return client +} + +type userAgentRoundTripper struct { + inner http.RoundTripper + userAgent string +} + +func (rt *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + if _, ok := req.Header["User-Agent"]; !ok { + req.Header.Set("User-Agent", rt.userAgent) + } + return rt.inner.RoundTrip(req) +} diff --git a/vendor/github.com/hashicorp/hc-install/internal/pubkey/pubkey.go b/vendor/github.com/hashicorp/hc-install/internal/pubkey/pubkey.go new file mode 100644 index 000000000..c36fba471 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/internal/pubkey/pubkey.go @@ -0,0 +1,127 @@ +package pubkey + +const ( + // See https://www.hashicorp.com/security + DefaultPublicKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBGB9+xkBEACabYZOWKmgZsHTdRDiyPJxhbuUiKX65GUWkyRMJKi/1dviVxOX +PG6hBPtF48IFnVgxKpIb7G6NjBousAV+CuLlv5yqFKpOZEGC6sBV+Gx8Vu1CICpl +Zm+HpQPcIzwBpN+Ar4l/exCG/f/MZq/oxGgH+TyRF3XcYDjG8dbJCpHO5nQ5Cy9h +QIp3/Bh09kET6lk+4QlofNgHKVT2epV8iK1cXlbQe2tZtfCUtxk+pxvU0UHXp+AB +0xc3/gIhjZp/dePmCOyQyGPJbp5bpO4UeAJ6frqhexmNlaw9Z897ltZmRLGq1p4a +RnWL8FPkBz9SCSKXS8uNyV5oMNVn4G1obCkc106iWuKBTibffYQzq5TG8FYVJKrh +RwWB6piacEB8hl20IIWSxIM3J9tT7CPSnk5RYYCTRHgA5OOrqZhC7JefudrP8n+M +pxkDgNORDu7GCfAuisrf7dXYjLsxG4tu22DBJJC0c/IpRpXDnOuJN1Q5e/3VUKKW +mypNumuQpP5lc1ZFG64TRzb1HR6oIdHfbrVQfdiQXpvdcFx+Fl57WuUraXRV6qfb +4ZmKHX1JEwM/7tu21QE4F1dz0jroLSricZxfaCTHHWNfvGJoZ30/MZUrpSC0IfB3 +iQutxbZrwIlTBt+fGLtm3vDtwMFNWM+Rb1lrOxEQd2eijdxhvBOHtlIcswARAQAB +tERIYXNoaUNvcnAgU2VjdXJpdHkgKGhhc2hpY29ycC5jb20vc2VjdXJpdHkpIDxz +ZWN1cml0eUBoYXNoaWNvcnAuY29tPokCVAQTAQoAPhYhBMh0AR8KtAURDQIQVTQ2 +XZRy10aPBQJgffsZAhsDBQkJZgGABQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAAAoJ +EDQ2XZRy10aPtpcP/0PhJKiHtC1zREpRTrjGizoyk4Sl2SXpBZYhkdrG++abo6zs +buaAG7kgWWChVXBo5E20L7dbstFK7OjVs7vAg/OLgO9dPD8n2M19rpqSbbvKYWvp +0NSgvFTT7lbyDhtPj0/bzpkZEhmvQaDWGBsbDdb2dBHGitCXhGMpdP0BuuPWEix+ +QnUMaPwU51q9GM2guL45Tgks9EKNnpDR6ZdCeWcqo1IDmklloidxT8aKL21UOb8t +cD+Bg8iPaAr73bW7Jh8TdcV6s6DBFub+xPJEB/0bVPmq3ZHs5B4NItroZ3r+h3ke +VDoSOSIZLl6JtVooOJ2la9ZuMqxchO3mrXLlXxVCo6cGcSuOmOdQSz4OhQE5zBxx +LuzA5ASIjASSeNZaRnffLIHmht17BPslgNPtm6ufyOk02P5XXwa69UCjA3RYrA2P +QNNC+OWZ8qQLnzGldqE4MnRNAxRxV6cFNzv14ooKf7+k686LdZrP/3fQu2p3k5rY +0xQUXKh1uwMUMtGR867ZBYaxYvwqDrg9XB7xi3N6aNyNQ+r7zI2lt65lzwG1v9hg +FG2AHrDlBkQi/t3wiTS3JOo/GCT8BjN0nJh0lGaRFtQv2cXOQGVRW8+V/9IpqEJ1 +qQreftdBFWxvH7VJq2mSOXUJyRsoUrjkUuIivaA9Ocdipk2CkP8bpuGz7ZF4uQIN +BGB9+xkBEACoklYsfvWRCjOwS8TOKBTfl8myuP9V9uBNbyHufzNETbhYeT33Cj0M +GCNd9GdoaknzBQLbQVSQogA+spqVvQPz1MND18GIdtmr0BXENiZE7SRvu76jNqLp +KxYALoK2Pc3yK0JGD30HcIIgx+lOofrVPA2dfVPTj1wXvm0rbSGA4Wd4Ng3d2AoR +G/wZDAQ7sdZi1A9hhfugTFZwfqR3XAYCk+PUeoFrkJ0O7wngaon+6x2GJVedVPOs +2x/XOR4l9ytFP3o+5ILhVnsK+ESVD9AQz2fhDEU6RhvzaqtHe+sQccR3oVLoGcat +ma5rbfzH0Fhj0JtkbP7WreQf9udYgXxVJKXLQFQgel34egEGG+NlbGSPG+qHOZtY +4uWdlDSvmo+1P95P4VG/EBteqyBbDDGDGiMs6lAMg2cULrwOsbxWjsWka8y2IN3z +1stlIJFvW2kggU+bKnQ+sNQnclq3wzCJjeDBfucR3a5WRojDtGoJP6Fc3luUtS7V +5TAdOx4dhaMFU9+01OoH8ZdTRiHZ1K7RFeAIslSyd4iA/xkhOhHq89F4ECQf3Bt4 +ZhGsXDTaA/VgHmf3AULbrC94O7HNqOvTWzwGiWHLfcxXQsr+ijIEQvh6rHKmJK8R +9NMHqc3L18eMO6bqrzEHW0Xoiu9W8Yj+WuB3IKdhclT3w0pO4Pj8gQARAQABiQI8 +BBgBCgAmFiEEyHQBHwq0BRENAhBVNDZdlHLXRo8FAmB9+xkCGwwFCQlmAYAACgkQ +NDZdlHLXRo9ZnA/7BmdpQLeTjEiXEJyW46efxlV1f6THn9U50GWcE9tebxCXgmQf +u+Uju4hreltx6GDi/zbVVV3HCa0yaJ4JVvA4LBULJVe3ym6tXXSYaOfMdkiK6P1v +JgfpBQ/b/mWB0yuWTUtWx18BQQwlNEQWcGe8n1lBbYsH9g7QkacRNb8tKUrUbWlQ +QsU8wuFgly22m+Va1nO2N5C/eE/ZEHyN15jEQ+QwgQgPrK2wThcOMyNMQX/VNEr1 +Y3bI2wHfZFjotmek3d7ZfP2VjyDudnmCPQ5xjezWpKbN1kvjO3as2yhcVKfnvQI5 +P5Frj19NgMIGAp7X6pF5Csr4FX/Vw316+AFJd9Ibhfud79HAylvFydpcYbvZpScl +7zgtgaXMCVtthe3GsG4gO7IdxxEBZ/Fm4NLnmbzCIWOsPMx/FxH06a539xFq/1E2 +1nYFjiKg8a5JFmYU/4mV9MQs4bP/3ip9byi10V+fEIfp5cEEmfNeVeW5E7J8PqG9 +t4rLJ8FR4yJgQUa2gs2SNYsjWQuwS/MJvAv4fDKlkQjQmYRAOp1SszAnyaplvri4 +ncmfDsf0r65/sd6S40g5lHH8LIbGxcOIN6kwthSTPWX89r42CbY8GzjTkaeejNKx +v1aCrO58wAtursO1DiXCvBY7+NdafMRnoHwBk50iPqrVkNA8fv+auRyB2/G5Ag0E +YH3+JQEQALivllTjMolxUW2OxrXb+a2Pt6vjCBsiJzrUj0Pa63U+lT9jldbCCfgP +wDpcDuO1O05Q8k1MoYZ6HddjWnqKG7S3eqkV5c3ct3amAXp513QDKZUfIDylOmhU +qvxjEgvGjdRjz6kECFGYr6Vnj/p6AwWv4/FBRFlrq7cnQgPynbIH4hrWvewp3Tqw +GVgqm5RRofuAugi8iZQVlAiQZJo88yaztAQ/7VsXBiHTn61ugQ8bKdAsr8w/ZZU5 +HScHLqRolcYg0cKN91c0EbJq9k1LUC//CakPB9mhi5+aUVUGusIM8ECShUEgSTCi +KQiJUPZ2CFbbPE9L5o9xoPCxjXoX+r7L/WyoCPTeoS3YRUMEnWKvc42Yxz3meRb+ +BmaqgbheNmzOah5nMwPupJYmHrjWPkX7oyyHxLSFw4dtoP2j6Z7GdRXKa2dUYdk2 +x3JYKocrDoPHh3Q0TAZujtpdjFi1BS8pbxYFb3hHmGSdvz7T7KcqP7ChC7k2RAKO +GiG7QQe4NX3sSMgweYpl4OwvQOn73t5CVWYp/gIBNZGsU3Pto8g27vHeWyH9mKr4 +cSepDhw+/X8FGRNdxNfpLKm7Vc0Sm9Sof8TRFrBTqX+vIQupYHRi5QQCuYaV6OVr +ITeegNK3So4m39d6ajCR9QxRbmjnx9UcnSYYDmIB6fpBuwT0ogNtABEBAAGJBHIE +GAEKACYCGwIWIQTIdAEfCrQFEQ0CEFU0Nl2UctdGjwUCYH4bgAUJAeFQ2wJAwXQg +BBkBCgAdFiEEs2y6kaLAcwxDX8KAsLRBCXaFtnYFAmB9/iUACgkQsLRBCXaFtnYX +BhAAlxejyFXoQwyGo9U+2g9N6LUb/tNtH29RHYxy4A3/ZUY7d/FMkArmh4+dfjf0 +p9MJz98Zkps20kaYP+2YzYmaizO6OA6RIddcEXQDRCPHmLts3097mJ/skx9qLAf6 +rh9J7jWeSqWO6VW6Mlx8j9m7sm3Ae1OsjOx/m7lGZOhY4UYfY627+Jf7WQ5103Qs +lgQ09es/vhTCx0g34SYEmMW15Tc3eCjQ21b1MeJD/V26npeakV8iCZ1kHZHawPq/ +aCCuYEcCeQOOteTWvl7HXaHMhHIx7jjOd8XX9V+UxsGz2WCIxX/j7EEEc7CAxwAN +nWp9jXeLfxYfjrUB7XQZsGCd4EHHzUyCf7iRJL7OJ3tz5Z+rOlNjSgci+ycHEccL +YeFAEV+Fz+sj7q4cFAferkr7imY1XEI0Ji5P8p/uRYw/n8uUf7LrLw5TzHmZsTSC +UaiL4llRzkDC6cVhYfqQWUXDd/r385OkE4oalNNE+n+txNRx92rpvXWZ5qFYfv7E +95fltvpXc0iOugPMzyof3lwo3Xi4WZKc1CC/jEviKTQhfn3WZukuF5lbz3V1PQfI +xFsYe9WYQmp25XGgezjXzp89C/OIcYsVB1KJAKihgbYdHyUN4fRCmOszmOUwEAKR +3k5j4X8V5bk08sA69NVXPn2ofxyk3YYOMYWW8ouObnXoS8QJEDQ2XZRy10aPMpsQ +AIbwX21erVqUDMPn1uONP6o4NBEq4MwG7d+fT85rc1U0RfeKBwjucAE/iStZDQoM +ZKWvGhFR+uoyg1LrXNKuSPB82unh2bpvj4zEnJsJadiwtShTKDsikhrfFEK3aCK8 +Zuhpiu3jxMFDhpFzlxsSwaCcGJqcdwGhWUx0ZAVD2X71UCFoOXPjF9fNnpy80YNp +flPjj2RnOZbJyBIM0sWIVMd8F44qkTASf8K5Qb47WFN5tSpePq7OCm7s8u+lYZGK +wR18K7VliundR+5a8XAOyUXOL5UsDaQCK4Lj4lRaeFXunXl3DJ4E+7BKzZhReJL6 +EugV5eaGonA52TWtFdB8p+79wPUeI3KcdPmQ9Ll5Zi/jBemY4bzasmgKzNeMtwWP +fk6WgrvBwptqohw71HDymGxFUnUP7XYYjic2sVKhv9AevMGycVgwWBiWroDCQ9Ja +btKfxHhI2p+g+rcywmBobWJbZsujTNjhtme+kNn1mhJsD3bKPjKQfAxaTskBLb0V +wgV21891TS1Dq9kdPLwoS4XNpYg2LLB4p9hmeG3fu9+OmqwY5oKXsHiWc43dei9Y +yxZ1AAUOIaIdPkq+YG/PhlGE4YcQZ4RPpltAr0HfGgZhmXWigbGS+66pUj+Ojysc +j0K5tCVxVu0fhhFpOlHv0LWaxCbnkgkQH9jfMEJkAWMOuQINBGCAXCYBEADW6RNr +ZVGNXvHVBqSiOWaxl1XOiEoiHPt50Aijt25yXbG+0kHIFSoR+1g6Lh20JTCChgfQ +kGGjzQvEuG1HTw07YhsvLc0pkjNMfu6gJqFox/ogc53mz69OxXauzUQ/TZ27GDVp +UBu+EhDKt1s3OtA6Bjz/csop/Um7gT0+ivHyvJ/jGdnPEZv8tNuSE/Uo+hn/Q9hg +8SbveZzo3C+U4KcabCESEFl8Gq6aRi9vAfa65oxD5jKaIz7cy+pwb0lizqlW7H9t +Qlr3dBfdIcdzgR55hTFC5/XrcwJ6/nHVH/xGskEasnfCQX8RYKMuy0UADJy72TkZ +bYaCx+XXIcVB8GTOmJVoAhrTSSVLAZspfCnjwnSxisDn3ZzsYrq3cV6sU8b+QlIX +7VAjurE+5cZiVlaxgCjyhKqlGgmonnReWOBacCgL/UvuwMmMp5TTLmiLXLT7uxeG +ojEyoCk4sMrqrU1jevHyGlDJH9Taux15GILDwnYFfAvPF9WCid4UZ4Ouwjcaxfys +3LxNiZIlUsXNKwS3mhiMRL4TRsbs4k4QE+LIMOsauIvcvm8/frydvQ/kUwIhVTH8 +0XGOH909bYtJvY3fudK7ShIwm7ZFTduBJUG473E/Fn3VkhTmBX6+PjOC50HR/Hyb +waRCzfDruMe3TAcE/tSP5CUOb9C7+P+hPzQcDwARAQABiQRyBBgBCgAmFiEEyHQB +Hwq0BRENAhBVNDZdlHLXRo8FAmCAXCYCGwIFCQlmAYACQAkQNDZdlHLXRo/BdCAE +GQEKAB0WIQQ3TsdbSFkTYEqDHMfIIMbVzSerhwUCYIBcJgAKCRDIIMbVzSerh0Xw +D/9ghnUsoNCu1OulcoJdHboMazJvDt/znttdQSnULBVElgM5zk0Uyv87zFBzuCyQ +JWL3bWesQ2uFx5fRWEPDEfWVdDrjpQGb1OCCQyz1QlNPV/1M1/xhKGS9EeXrL8Dw +F6KTGkRwn1yXiP4BGgfeFIQHmJcKXEZ9HkrpNb8mcexkROv4aIPAwn+IaE+NHVtt +IBnufMXLyfpkWJQtJa9elh9PMLlHHnuvnYLvuAoOkhuvs7fXDMpfFZ01C+QSv1dz +Hm52GSStERQzZ51w4c0rYDneYDniC/sQT1x3dP5Xf6wzO+EhRMabkvoTbMqPsTEP +xyWr2pNtTBYp7pfQjsHxhJpQF0xjGN9C39z7f3gJG8IJhnPeulUqEZjhRFyVZQ6/ +siUeq7vu4+dM/JQL+i7KKe7Lp9UMrG6NLMH+ltaoD3+lVm8fdTUxS5MNPoA/I8cK +1OWTJHkrp7V/XaY7mUtvQn5V1yET5b4bogz4nME6WLiFMd+7x73gB+YJ6MGYNuO8 +e/NFK67MfHbk1/AiPTAJ6s5uHRQIkZcBPG7y5PpfcHpIlwPYCDGYlTajZXblyKrw +BttVnYKvKsnlysv11glSg0DphGxQJbXzWpvBNyhMNH5dffcfvd3eXJAxnD81GD2z +ZAriMJ4Av2TfeqQ2nxd2ddn0jX4WVHtAvLXfCgLM2Gveho4jD/9sZ6PZz/rEeTvt +h88t50qPcBa4bb25X0B5FO3TeK2LL3VKLuEp5lgdcHVonrcdqZFobN1CgGJua8TW +SprIkh+8ATZ/FXQTi01NzLhHXT1IQzSpFaZw0gb2f5ruXwvTPpfXzQrs2omY+7s7 +fkCwGPesvpSXPKn9v8uhUwD7NGW/Dm+jUM+QtC/FqzX7+/Q+OuEPjClUh1cqopCZ +EvAI3HjnavGrYuU6DgQdjyGT/UDbuwbCXqHxHojVVkISGzCTGpmBcQYQqhcFRedJ +yJlu6PSXlA7+8Ajh52oiMJ3ez4xSssFgUQAyOB16432tm4erpGmCyakkoRmMUn3p +wx+QIppxRlsHznhcCQKR3tcblUqH3vq5i4/ZAihusMCa0YrShtxfdSb13oKX+pFr +aZXvxyZlCa5qoQQBV1sowmPL1N2j3dR9TVpdTyCFQSv4KeiExmowtLIjeCppRBEK +eeYHJnlfkyKXPhxTVVO6H+dU4nVu0ASQZ07KiQjbI+zTpPKFLPp3/0sPRJM57r1+ +aTS71iR7nZNZ1f8LZV2OvGE6fJVtgJ1J4Nu02K54uuIhU3tg1+7Xt+IqwRc9rbVr +pHH/hFCYBPW2D2dxB+k2pQlg5NI+TpsXj5Zun8kRw5RtVb+dLuiH/xmxArIee8Jq +ZF5q4h4I33PSGDdSvGXn9UMY5Isjpg== +=7pIB +-----END PGP PUBLIC KEY BLOCK-----` +) diff --git a/vendor/github.com/hashicorp/hc-install/internal/releasesjson/checksum_downloader.go b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/checksum_downloader.go new file mode 100644 index 000000000..2b85ea344 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/checksum_downloader.go @@ -0,0 +1,214 @@ +package releasesjson + +import ( + "bytes" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "log" + "net/url" + "strings" + + "github.com/hashicorp/hc-install/internal/httpclient" + "golang.org/x/crypto/openpgp" +) + +type ChecksumDownloader struct { + ProductVersion *ProductVersion + Logger *log.Logger + ArmoredPublicKey string + + BaseURL string +} + +type ChecksumFileMap map[string]HashSum + +type HashSum []byte + +func (hs HashSum) Size() int { + return len(hs) +} + +func (hs HashSum) String() string { + return hex.EncodeToString(hs) +} + +func HashSumFromHexDigest(hexDigest string) (HashSum, error) { + sumBytes, err := hex.DecodeString(hexDigest) + if err != nil { + return nil, err + } + return HashSum(sumBytes), nil +} + +func (cd *ChecksumDownloader) DownloadAndVerifyChecksums() (ChecksumFileMap, error) { + sigFilename, err := cd.findSigFilename(cd.ProductVersion) + if err != nil { + return nil, err + } + + client := httpclient.NewHTTPClient() + sigURL := fmt.Sprintf("%s/%s/%s/%s", cd.BaseURL, + url.PathEscape(cd.ProductVersion.Name), + url.PathEscape(cd.ProductVersion.RawVersion), + url.PathEscape(sigFilename)) + cd.Logger.Printf("downloading signature from %s", sigURL) + sigResp, err := client.Get(sigURL) + if err != nil { + return nil, err + } + + if sigResp.StatusCode != 200 { + return nil, fmt.Errorf("failed to download signature from %q: %s", sigURL, sigResp.Status) + } + + defer sigResp.Body.Close() + + shasumsURL := fmt.Sprintf("%s/%s/%s/%s", cd.BaseURL, + url.PathEscape(cd.ProductVersion.Name), + url.PathEscape(cd.ProductVersion.RawVersion), + url.PathEscape(cd.ProductVersion.SHASUMS)) + cd.Logger.Printf("downloading checksums from %s", shasumsURL) + sumsResp, err := client.Get(shasumsURL) + if err != nil { + return nil, err + } + + if sumsResp.StatusCode != 200 { + return nil, fmt.Errorf("failed to download checksums from %q: %s", shasumsURL, sumsResp.Status) + } + + defer sumsResp.Body.Close() + + var shaSums strings.Builder + sumsReader := io.TeeReader(sumsResp.Body, &shaSums) + + err = cd.verifySumsSignature(sumsReader, sigResp.Body) + if err != nil { + return nil, err + } + + return fileMapFromChecksums(shaSums) +} + +func fileMapFromChecksums(checksums strings.Builder) (ChecksumFileMap, error) { + csMap := make(ChecksumFileMap, 0) + + lines := strings.Split(checksums.String(), "\n") + for _, line := range lines { + line = strings.TrimSpace(line) + if line == "" { + continue + } + parts := strings.Fields(line) + if len(parts) != 2 { + return nil, fmt.Errorf("unexpected checksum line format: %q", line) + } + + h, err := HashSumFromHexDigest(parts[0]) + if err != nil { + return nil, err + } + + if h.Size() != sha256.Size { + return nil, fmt.Errorf("unexpected sha256 format (len: %d, expected: %d)", + h.Size(), sha256.Size) + } + + csMap[parts[1]] = h + } + return csMap, nil +} + +func compareChecksum(logger *log.Logger, r io.Reader, verifiedHashSum HashSum, filename string, expectedSize int64) error { + h := sha256.New() + + // This may take a while depending on network connection as the io.Reader + // is expected to be http.Response.Body which streams the bytes + // on demand over the network. + logger.Printf("copying %q (%d bytes) to calculate checksum", filename, expectedSize) + bytesCopied, err := io.Copy(h, r) + if err != nil { + return err + } + logger.Printf("copied %d bytes of %q", bytesCopied, filename) + + if expectedSize != 0 && bytesCopied != int64(expectedSize) { + return fmt.Errorf("unexpected size (downloaded: %d, expected: %d)", + bytesCopied, expectedSize) + } + + calculatedSum := h.Sum(nil) + if !bytes.Equal(calculatedSum, verifiedHashSum) { + return fmt.Errorf("checksum mismatch (expected %q, calculated %q)", + verifiedHashSum, + hex.EncodeToString(calculatedSum)) + } + + logger.Printf("checksum matches: %q", hex.EncodeToString(calculatedSum)) + + return nil +} + +func (cd *ChecksumDownloader) verifySumsSignature(checksums, signature io.Reader) error { + el, err := cd.keyEntityList() + if err != nil { + return err + } + + _, err = openpgp.CheckDetachedSignature(el, checksums, signature) + if err != nil { + return fmt.Errorf("unable to verify checksums signature: %w", err) + } + + cd.Logger.Printf("checksum signature is valid") + + return nil +} + +func (cd *ChecksumDownloader) findSigFilename(pv *ProductVersion) (string, error) { + sigFiles := pv.SHASUMSSigs + if len(sigFiles) == 0 { + sigFiles = []string{pv.SHASUMSSig} + } + + keyIds, err := cd.pubKeyIds() + if err != nil { + return "", err + } + + for _, filename := range sigFiles { + for _, keyID := range keyIds { + if strings.HasSuffix(filename, fmt.Sprintf("_SHA256SUMS.%s.sig", keyID)) { + return filename, nil + } + } + if strings.HasSuffix(filename, "_SHA256SUMS.sig") { + return filename, nil + } + } + + return "", fmt.Errorf("no suitable sig file found") +} + +func (cd *ChecksumDownloader) pubKeyIds() ([]string, error) { + entityList, err := cd.keyEntityList() + if err != nil { + return nil, err + } + + fingerprints := make([]string, 0) + for _, entity := range entityList { + fingerprints = append(fingerprints, entity.PrimaryKey.KeyIdShortString()) + } + + return fingerprints, nil +} + +func (cd *ChecksumDownloader) keyEntityList() (openpgp.EntityList, error) { + if cd.ArmoredPublicKey == "" { + return nil, fmt.Errorf("no public key provided") + } + return openpgp.ReadArmoredKeyRing(strings.NewReader(cd.ArmoredPublicKey)) +} diff --git a/vendor/github.com/hashicorp/hc-install/internal/releasesjson/downloader.go b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/downloader.go new file mode 100644 index 000000000..e9cd94e43 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/downloader.go @@ -0,0 +1,179 @@ +package releasesjson + +import ( + "archive/zip" + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "log" + "net/url" + "os" + "path/filepath" + "runtime" + + "github.com/hashicorp/hc-install/internal/httpclient" +) + +type Downloader struct { + Logger *log.Logger + VerifyChecksum bool + ArmoredPublicKey string + BaseURL string +} + +func (d *Downloader) DownloadAndUnpack(ctx context.Context, pv *ProductVersion, dstDir string) error { + if len(pv.Builds) == 0 { + return fmt.Errorf("no builds found for %s %s", pv.Name, pv.Version) + } + + pb, ok := pv.Builds.FilterBuild(runtime.GOOS, runtime.GOARCH, "zip") + if !ok { + return fmt.Errorf("no ZIP archive found for %s %s %s/%s", + pv.Name, pv.Version, runtime.GOOS, runtime.GOARCH) + } + + var verifiedChecksum HashSum + if d.VerifyChecksum { + v := &ChecksumDownloader{ + BaseURL: d.BaseURL, + ProductVersion: pv, + Logger: d.Logger, + ArmoredPublicKey: d.ArmoredPublicKey, + } + verifiedChecksums, err := v.DownloadAndVerifyChecksums() + if err != nil { + return err + } + var ok bool + verifiedChecksum, ok = verifiedChecksums[pb.Filename] + if !ok { + return fmt.Errorf("no checksum found for %q", pb.Filename) + } + } + + client := httpclient.NewHTTPClient() + + archiveURL := pb.URL + if d.BaseURL != "" { + // ensure that absolute download links from mocked responses + // are still pointing to the mock server if one is set + baseURL, err := url.Parse(d.BaseURL) + if err != nil { + return err + } + + u, err := url.Parse(archiveURL) + if err != nil { + return err + } + u.Scheme = baseURL.Scheme + u.Host = baseURL.Host + archiveURL = u.String() + } + + d.Logger.Printf("downloading archive from %s", archiveURL) + resp, err := client.Get(archiveURL) + if err != nil { + return err + } + + if resp.StatusCode != 200 { + return fmt.Errorf("failed to download ZIP archive from %q: %s", archiveURL, resp.Status) + } + + defer resp.Body.Close() + + var pkgReader io.Reader + pkgReader = resp.Body + + contentType := resp.Header.Get("content-type") + if !contentTypeIsZip(contentType) { + return fmt.Errorf("unexpected content-type: %s (expected any of %q)", + contentType, zipMimeTypes) + } + + expectedSize := resp.ContentLength + + if d.VerifyChecksum { + d.Logger.Printf("verifying checksum of %q", pb.Filename) + // provide extra reader to calculate & compare checksum + var buf bytes.Buffer + r := io.TeeReader(resp.Body, &buf) + pkgReader = &buf + + err := compareChecksum(d.Logger, r, verifiedChecksum, pb.Filename, expectedSize) + if err != nil { + return err + } + } + + pkgFile, err := ioutil.TempFile("", pb.Filename) + if err != nil { + return err + } + defer pkgFile.Close() + + d.Logger.Printf("copying %q (%d bytes) to %s", pb.Filename, expectedSize, pkgFile.Name()) + // Unless the bytes were already downloaded above for checksum verification + // this may take a while depending on network connection as the io.Reader + // is expected to be http.Response.Body which streams the bytes + // on demand over the network. + bytesCopied, err := io.Copy(pkgFile, pkgReader) + if err != nil { + return err + } + d.Logger.Printf("copied %d bytes to %s", bytesCopied, pkgFile.Name()) + + if expectedSize != 0 && bytesCopied != int64(expectedSize) { + return fmt.Errorf("unexpected size (downloaded: %d, expected: %d)", + bytesCopied, expectedSize) + } + + r, err := zip.OpenReader(pkgFile.Name()) + if err != nil { + return err + } + defer r.Close() + + for _, f := range r.File { + srcFile, err := f.Open() + if err != nil { + return err + } + + d.Logger.Printf("unpacking %s to %s", f.Name, dstDir) + dstPath := filepath.Join(dstDir, f.Name) + dstFile, err := os.Create(dstPath) + if err != nil { + return err + } + + _, err = io.Copy(dstFile, srcFile) + if err != nil { + return err + } + srcFile.Close() + dstFile.Close() + } + + return nil +} + +// The production release site uses consistent single mime type +// but mime types are platform-dependent +// and we may use different OS under test +var zipMimeTypes = []string{ + "application/x-zip-compressed", // Windows + "application/zip", // Unix +} + +func contentTypeIsZip(contentType string) bool { + for _, mt := range zipMimeTypes { + if mt == contentType { + return true + } + } + return false +} diff --git a/vendor/github.com/hashicorp/hc-install/internal/releasesjson/product_version.go b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/product_version.go new file mode 100644 index 000000000..5eecb0136 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/product_version.go @@ -0,0 +1,41 @@ +package releasesjson + +import "github.com/hashicorp/go-version" + +// ProductVersion is a wrapper around a particular product version like +// "consul 0.5.1". A ProductVersion may have one or more builds. +type ProductVersion struct { + Name string `json:"name"` + RawVersion string `json:"version"` + Version *version.Version `json:"-"` + SHASUMS string `json:"shasums,omitempty"` + SHASUMSSig string `json:"shasums_signature,omitempty"` + SHASUMSSigs []string `json:"shasums_signatures,omitempty"` + Builds ProductBuilds `json:"builds"` +} + +type ProductVersionsMap map[string]*ProductVersion + +type ProductVersions []*ProductVersion + +func (pv ProductVersions) Len() int { + return len(pv) +} + +func (pv ProductVersions) Less(i, j int) bool { + return pv[i].Version.LessThan(pv[j].Version) +} + +func (pv ProductVersions) Swap(i, j int) { + pv[i], pv[j] = pv[j], pv[i] +} + +func (pvm ProductVersionsMap) AsSlice() ProductVersions { + versions := make(ProductVersions, 0) + + for _, pVersion := range pvm { + versions = append(versions, pVersion) + } + + return versions +} diff --git a/vendor/github.com/hashicorp/hc-install/internal/releasesjson/releases.go b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/releases.go new file mode 100644 index 000000000..849f16a52 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/releases.go @@ -0,0 +1,177 @@ +package releasesjson + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "log" + "net/url" + "strings" + + "github.com/hashicorp/go-version" + "github.com/hashicorp/hc-install/internal/httpclient" +) + +const defaultBaseURL = "https://releases.hashicorp.com" + +// Product is a top-level product like "Consul" or "Nomad". A Product may have +// one or more versions. +type Product struct { + Name string `json:"name"` + Versions ProductVersionsMap `json:"versions"` +} + +type ProductBuilds []*ProductBuild + +func (pbs ProductBuilds) FilterBuild(os string, arch string, suffix string) (*ProductBuild, bool) { + for _, pb := range pbs { + if pb.OS == os && pb.Arch == arch && strings.HasSuffix(pb.Filename, suffix) { + return pb, true + } + } + return nil, false +} + +// ProductBuild is an OS/arch-specific representation of a product. This is the +// actual file that a user would download, like "consul_0.5.1_linux_amd64". +type ProductBuild struct { + Name string `json:"name"` + Version string `json:"version"` + OS string `json:"os"` + Arch string `json:"arch"` + Filename string `json:"filename"` + URL string `json:"url"` +} + +type Releases struct { + logger *log.Logger + BaseURL string +} + +func NewReleases() *Releases { + return &Releases{ + logger: log.New(ioutil.Discard, "", 0), + BaseURL: defaultBaseURL, + } +} + +func (r *Releases) SetLogger(logger *log.Logger) { + r.logger = logger +} + +func (r *Releases) ListProductVersions(ctx context.Context, productName string) (ProductVersionsMap, error) { + client := httpclient.NewHTTPClient() + + productIndexURL := fmt.Sprintf("%s/%s/index.json", + r.BaseURL, + url.PathEscape(productName)) + r.logger.Printf("requesting versions from %s", productIndexURL) + + resp, err := client.Get(productIndexURL) + if err != nil { + return nil, err + } + + if resp.StatusCode != 200 { + return nil, fmt.Errorf("failed to obtain product versions from %q: %s ", + productIndexURL, resp.Status) + } + + contentType := resp.Header.Get("content-type") + if contentType != "application/json" && contentType != "application/vnd+hashicorp.releases-api.v0+json" { + return nil, fmt.Errorf("unexpected Content-Type: %q", contentType) + } + + defer resp.Body.Close() + + r.logger.Printf("received %s", resp.Status) + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + p := Product{} + err = json.Unmarshal(body, &p) + if err != nil { + return nil, fmt.Errorf("%w: failed to unmarshal: %q", + err, string(body)) + } + + for rawVersion := range p.Versions { + v, err := version.NewVersion(rawVersion) + if err != nil { + // remove unparseable version + delete(p.Versions, rawVersion) + continue + } + + if ok, _ := versionIsSupported(v); !ok { + // Remove (currently unsupported) enterprise + // version and any other "custom" build + delete(p.Versions, rawVersion) + continue + } + + p.Versions[rawVersion].Version = v + } + + return p.Versions, nil +} + +func (r *Releases) GetProductVersion(ctx context.Context, product string, version *version.Version) (*ProductVersion, error) { + if ok, err := versionIsSupported(version); !ok { + return nil, fmt.Errorf("%s: %w", product, err) + } + + client := httpclient.NewHTTPClient() + + indexURL := fmt.Sprintf("%s/%s/%s/index.json", + r.BaseURL, + url.PathEscape(product), + url.PathEscape(version.String())) + r.logger.Printf("requesting version from %s", indexURL) + + resp, err := client.Get(indexURL) + if err != nil { + return nil, err + } + + if resp.StatusCode != 200 { + return nil, fmt.Errorf("failed to obtain product version from %q: %s ", + indexURL, resp.Status) + } + + contentType := resp.Header.Get("content-type") + if contentType != "application/json" && contentType != "application/vnd+hashicorp.releases-api.v0+json" { + return nil, fmt.Errorf("unexpected Content-Type: %q", contentType) + } + + defer resp.Body.Close() + + r.logger.Printf("received %s", resp.Status) + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + pv := &ProductVersion{} + err = json.Unmarshal(body, pv) + if err != nil { + return nil, fmt.Errorf("%w: failed to unmarshal response: %q", + err, string(body)) + } + + return pv, nil +} + +func versionIsSupported(v *version.Version) (bool, error) { + isSupported := v.Metadata() == "" + if !isSupported { + return false, fmt.Errorf("cannot obtain %s (enterprise versions are not supported)", + v.String()) + } + return true, nil +} diff --git a/vendor/github.com/hashicorp/hc-install/internal/src/src.go b/vendor/github.com/hashicorp/hc-install/internal/src/src.go new file mode 100644 index 000000000..5b53d92b1 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/internal/src/src.go @@ -0,0 +1,3 @@ +package src + +type InstallSrcSigil struct{} diff --git a/vendor/github.com/hashicorp/hc-install/internal/validators/validators.go b/vendor/github.com/hashicorp/hc-install/internal/validators/validators.go new file mode 100644 index 000000000..5e3e6c816 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/internal/validators/validators.go @@ -0,0 +1,18 @@ +package validators + +import "regexp" + +var ( + productNameRe = regexp.MustCompile(`^[a-z0-9-]+$`) + binaryNameRe = regexp.MustCompile(`^[a-zA-Z0-9-_.]+$`) +) + +// IsProductNameValid provides early user-facing validation of a product name +func IsProductNameValid(productName string) bool { + return productNameRe.MatchString(productName) +} + +// IsBinaryNameValid provides early user-facing validation of binary name +func IsBinaryNameValid(binaryName string) bool { + return binaryNameRe.MatchString(binaryName) +} diff --git a/vendor/github.com/hashicorp/hc-install/internal/version/version.go b/vendor/github.com/hashicorp/hc-install/internal/version/version.go new file mode 100644 index 000000000..d8bc462c5 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/internal/version/version.go @@ -0,0 +1,9 @@ +package version + +const version = "0.1.0" + +// ModuleVersion returns the current version of the github.com/hashicorp/hc-install Go module. +// This is a function to allow for future possible enhancement using debug.BuildInfo. +func ModuleVersion() string { + return version +} diff --git a/vendor/github.com/hashicorp/hc-install/product/consul.go b/vendor/github.com/hashicorp/hc-install/product/consul.go new file mode 100644 index 000000000..aeeac9469 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/product/consul.go @@ -0,0 +1,55 @@ +package product + +import ( + "context" + "fmt" + "os/exec" + "regexp" + "runtime" + "strings" + + "github.com/hashicorp/go-version" + "github.com/hashicorp/hc-install/internal/build" +) + +var consulVersionOutputRe = regexp.MustCompile(`Consul ` + simpleVersionRe) + +var ( + v1_18 = version.Must(version.NewVersion("1.18")) +) + +var Consul = Product{ + Name: "consul", + BinaryName: func() string { + if runtime.GOOS == "windows" { + return "consul.exe" + } + return "consul" + }, + GetVersion: func(ctx context.Context, path string) (*version.Version, error) { + cmd := exec.CommandContext(ctx, path, "version") + + out, err := cmd.Output() + if err != nil { + return nil, err + } + + stdout := strings.TrimSpace(string(out)) + + submatches := consulVersionOutputRe.FindStringSubmatch(stdout) + if len(submatches) != 2 { + return nil, fmt.Errorf("unexpected number of version matches %d for %s", len(submatches), stdout) + } + v, err := version.NewVersion(submatches[1]) + if err != nil { + return nil, fmt.Errorf("unable to parse version %q: %w", submatches[1], err) + } + + return v, err + }, + BuildInstructions: &BuildInstructions{ + GitRepoURL: "https://github.com/hashicorp/consul.git", + PreCloneCheck: &build.GoIsInstalled{}, + Build: &build.GoBuild{Version: v1_18}, + }, +} diff --git a/vendor/github.com/hashicorp/hc-install/product/product.go b/vendor/github.com/hashicorp/hc-install/product/product.go new file mode 100644 index 000000000..0b5e2a546 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/product/product.go @@ -0,0 +1,60 @@ +package product + +import ( + "context" + "time" + + "github.com/hashicorp/go-version" +) + +type Product struct { + // Name which identifies the product + // on releases.hashicorp.com and in Checkpoint + Name string + + // BinaryName represents name of the unpacked binary to be executed or built + BinaryName BinaryNameFunc + + // GetVersion represents how to obtain the version of the product + // reflecting any output or CLI flag differences + GetVersion func(ctx context.Context, execPath string) (*version.Version, error) + + // BuildInstructions represents how to build the product "from scratch" + BuildInstructions *BuildInstructions +} + +type BinaryNameFunc func() string + +type BuildInstructions struct { + GitRepoURL string + + // CloneTimeout overrides default timeout + // for cloning the repository + CloneTimeout time.Duration + + // PreCloneCheck represents any checks to run + // prior to building, such as verifying build + // dependencies (e.g. whether Go is installed) + PreCloneCheck Checker + + // PreCloneCheckTimeout overrides default timeout + // for the PreCloneCheck + PreCloneCheckTimeout time.Duration + + // Build represents how to build the product + // after checking out the source code + Build Builder + + // BuildTimeout overrides default timeout + // for the Builder + BuildTimeout time.Duration +} + +type Checker interface { + Check(ctx context.Context) error +} + +type Builder interface { + Build(ctx context.Context, repoDir, targetDir, binaryName string) (string, error) + Remove(ctx context.Context) error +} diff --git a/vendor/github.com/hashicorp/hc-install/product/terraform.go b/vendor/github.com/hashicorp/hc-install/product/terraform.go new file mode 100644 index 000000000..d820203a7 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/product/terraform.go @@ -0,0 +1,55 @@ +package product + +import ( + "context" + "fmt" + "os/exec" + "regexp" + "runtime" + "strings" + + "github.com/hashicorp/go-version" + "github.com/hashicorp/hc-install/internal/build" +) + +var ( + simpleVersionRe = `v?(?P[0-9]+(?:\.[0-9]+)*(?:-[A-Za-z0-9\.]+)?)` + + terraformVersionOutputRe = regexp.MustCompile(`Terraform ` + simpleVersionRe) +) + +var Terraform = Product{ + Name: "terraform", + BinaryName: func() string { + if runtime.GOOS == "windows" { + return "terraform.exe" + } + return "terraform" + }, + GetVersion: func(ctx context.Context, path string) (*version.Version, error) { + cmd := exec.CommandContext(ctx, path, "version") + + out, err := cmd.Output() + if err != nil { + return nil, err + } + + stdout := strings.TrimSpace(string(out)) + + submatches := terraformVersionOutputRe.FindStringSubmatch(stdout) + if len(submatches) != 2 { + return nil, fmt.Errorf("unexpected number of version matches %d for %s", len(submatches), stdout) + } + v, err := version.NewVersion(submatches[1]) + if err != nil { + return nil, fmt.Errorf("unable to parse version %q: %w", submatches[1], err) + } + + return v, err + }, + BuildInstructions: &BuildInstructions{ + GitRepoURL: "https://github.com/hashicorp/terraform.git", + PreCloneCheck: &build.GoIsInstalled{}, + Build: &build.GoBuild{DetectVendoring: true}, + }, +} diff --git a/vendor/github.com/hashicorp/hc-install/releases/exact_version.go b/vendor/github.com/hashicorp/hc-install/releases/exact_version.go new file mode 100644 index 000000000..7fe2cb56e --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/releases/exact_version.go @@ -0,0 +1,147 @@ +package releases + +import ( + "context" + "fmt" + "io/ioutil" + "log" + "os" + "path/filepath" + "time" + + "github.com/hashicorp/go-version" + "github.com/hashicorp/hc-install/internal/pubkey" + rjson "github.com/hashicorp/hc-install/internal/releasesjson" + isrc "github.com/hashicorp/hc-install/internal/src" + "github.com/hashicorp/hc-install/internal/validators" + "github.com/hashicorp/hc-install/product" +) + +// ExactVersion installs the given Version of product +// to OS temp directory, or to InstallDir (if not empty) +type ExactVersion struct { + Product product.Product + Version *version.Version + InstallDir string + Timeout time.Duration + + SkipChecksumVerification bool + + // ArmoredPublicKey is a public PGP key in ASCII/armor format to use + // instead of built-in pubkey to verify signature of downloaded checksums + ArmoredPublicKey string + + apiBaseURL string + logger *log.Logger + pathsToRemove []string +} + +func (*ExactVersion) IsSourceImpl() isrc.InstallSrcSigil { + return isrc.InstallSrcSigil{} +} + +func (ev *ExactVersion) SetLogger(logger *log.Logger) { + ev.logger = logger +} + +func (ev *ExactVersion) log() *log.Logger { + if ev.logger == nil { + return discardLogger + } + return ev.logger +} + +func (ev *ExactVersion) Validate() error { + if !validators.IsProductNameValid(ev.Product.Name) { + return fmt.Errorf("invalid product name: %q", ev.Product.Name) + } + + if !validators.IsBinaryNameValid(ev.Product.BinaryName()) { + return fmt.Errorf("invalid binary name: %q", ev.Product.BinaryName()) + } + + if ev.Version == nil { + return fmt.Errorf("unknown version") + } + + return nil +} + +func (ev *ExactVersion) Install(ctx context.Context) (string, error) { + timeout := defaultInstallTimeout + if ev.Timeout > 0 { + timeout = ev.Timeout + } + ctx, cancelFunc := context.WithTimeout(ctx, timeout) + defer cancelFunc() + + if ev.pathsToRemove == nil { + ev.pathsToRemove = make([]string, 0) + } + + dstDir := ev.InstallDir + if dstDir == "" { + var err error + dirName := fmt.Sprintf("%s_*", ev.Product.Name) + dstDir, err = ioutil.TempDir("", dirName) + if err != nil { + return "", err + } + ev.pathsToRemove = append(ev.pathsToRemove, dstDir) + ev.log().Printf("created new temp dir at %s", dstDir) + } + ev.log().Printf("will install into dir at %s", dstDir) + + rels := rjson.NewReleases() + if ev.apiBaseURL != "" { + rels.BaseURL = ev.apiBaseURL + } + rels.SetLogger(ev.log()) + pv, err := rels.GetProductVersion(ctx, ev.Product.Name, ev.Version) + if err != nil { + return "", err + } + + d := &rjson.Downloader{ + Logger: ev.log(), + VerifyChecksum: !ev.SkipChecksumVerification, + ArmoredPublicKey: pubkey.DefaultPublicKey, + BaseURL: rels.BaseURL, + } + if ev.ArmoredPublicKey != "" { + d.ArmoredPublicKey = ev.ArmoredPublicKey + } + if ev.apiBaseURL != "" { + d.BaseURL = ev.apiBaseURL + } + + err = d.DownloadAndUnpack(ctx, pv, dstDir) + if err != nil { + return "", err + } + + execPath := filepath.Join(dstDir, ev.Product.BinaryName()) + + ev.pathsToRemove = append(ev.pathsToRemove, execPath) + + ev.log().Printf("changing perms of %s", execPath) + err = os.Chmod(execPath, 0o700) + if err != nil { + return "", err + } + + return execPath, nil +} + +func (ev *ExactVersion) Remove(ctx context.Context) error { + if ev.pathsToRemove != nil { + for _, path := range ev.pathsToRemove { + err := os.RemoveAll(path) + if err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/hc-install/releases/latest_version.go b/vendor/github.com/hashicorp/hc-install/releases/latest_version.go new file mode 100644 index 000000000..c5c1807a8 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/releases/latest_version.go @@ -0,0 +1,171 @@ +package releases + +import ( + "context" + "fmt" + "io/ioutil" + "log" + "os" + "path/filepath" + "sort" + "time" + + "github.com/hashicorp/go-version" + "github.com/hashicorp/hc-install/internal/pubkey" + rjson "github.com/hashicorp/hc-install/internal/releasesjson" + isrc "github.com/hashicorp/hc-install/internal/src" + "github.com/hashicorp/hc-install/internal/validators" + "github.com/hashicorp/hc-install/product" +) + +type LatestVersion struct { + Product product.Product + Constraints version.Constraints + InstallDir string + Timeout time.Duration + IncludePrereleases bool + + SkipChecksumVerification bool + + // ArmoredPublicKey is a public PGP key in ASCII/armor format to use + // instead of built-in pubkey to verify signature of downloaded checksums + ArmoredPublicKey string + + apiBaseURL string + logger *log.Logger + pathsToRemove []string +} + +func (*LatestVersion) IsSourceImpl() isrc.InstallSrcSigil { + return isrc.InstallSrcSigil{} +} + +func (lv *LatestVersion) SetLogger(logger *log.Logger) { + lv.logger = logger +} + +func (lv *LatestVersion) log() *log.Logger { + if lv.logger == nil { + return discardLogger + } + return lv.logger +} + +func (lv *LatestVersion) Validate() error { + if !validators.IsProductNameValid(lv.Product.Name) { + return fmt.Errorf("invalid product name: %q", lv.Product.Name) + } + + if !validators.IsBinaryNameValid(lv.Product.BinaryName()) { + return fmt.Errorf("invalid binary name: %q", lv.Product.BinaryName()) + } + + return nil +} + +func (lv *LatestVersion) Install(ctx context.Context) (string, error) { + timeout := defaultInstallTimeout + if lv.Timeout > 0 { + timeout = lv.Timeout + } + ctx, cancelFunc := context.WithTimeout(ctx, timeout) + defer cancelFunc() + + if lv.pathsToRemove == nil { + lv.pathsToRemove = make([]string, 0) + } + + dstDir := lv.InstallDir + if dstDir == "" { + var err error + dirName := fmt.Sprintf("%s_*", lv.Product.Name) + dstDir, err = ioutil.TempDir("", dirName) + if err != nil { + return "", err + } + lv.pathsToRemove = append(lv.pathsToRemove, dstDir) + lv.log().Printf("created new temp dir at %s", dstDir) + } + lv.log().Printf("will install into dir at %s", dstDir) + + rels := rjson.NewReleases() + if lv.apiBaseURL != "" { + rels.BaseURL = lv.apiBaseURL + } + rels.SetLogger(lv.log()) + versions, err := rels.ListProductVersions(ctx, lv.Product.Name) + if err != nil { + return "", err + } + + if len(versions) == 0 { + return "", fmt.Errorf("no versions found for %q", lv.Product.Name) + } + + versionToInstall, ok := lv.findLatestMatchingVersion(versions, lv.Constraints) + if !ok { + return "", fmt.Errorf("no matching version found for %q", lv.Constraints) + } + + d := &rjson.Downloader{ + Logger: lv.log(), + VerifyChecksum: !lv.SkipChecksumVerification, + ArmoredPublicKey: pubkey.DefaultPublicKey, + BaseURL: rels.BaseURL, + } + if lv.ArmoredPublicKey != "" { + d.ArmoredPublicKey = lv.ArmoredPublicKey + } + if lv.apiBaseURL != "" { + d.BaseURL = lv.apiBaseURL + } + err = d.DownloadAndUnpack(ctx, versionToInstall, dstDir) + if err != nil { + return "", err + } + + execPath := filepath.Join(dstDir, lv.Product.BinaryName()) + + lv.pathsToRemove = append(lv.pathsToRemove, execPath) + + lv.log().Printf("changing perms of %s", execPath) + err = os.Chmod(execPath, 0o700) + if err != nil { + return "", err + } + + return execPath, nil +} + +func (lv *LatestVersion) Remove(ctx context.Context) error { + if lv.pathsToRemove != nil { + for _, path := range lv.pathsToRemove { + err := os.RemoveAll(path) + if err != nil { + return err + } + } + } + return nil +} + +func (lv *LatestVersion) findLatestMatchingVersion(pvs rjson.ProductVersionsMap, vc version.Constraints) (*rjson.ProductVersion, bool) { + versions := make(version.Collection, 0) + for _, pv := range pvs.AsSlice() { + if !lv.IncludePrereleases && pv.Version.Prerelease() != "" { + // skip prereleases if desired + continue + } + + versions = append(versions, pv.Version) + } + + if len(versions) == 0 { + return nil, false + } + + sort.Stable(versions) + latestVersion := versions[len(versions)-1] + + return pvs[latestVersion.Original()], true +} diff --git a/vendor/github.com/hashicorp/hc-install/releases/releases.go b/vendor/github.com/hashicorp/hc-install/releases/releases.go new file mode 100644 index 000000000..2c3f30992 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/releases/releases.go @@ -0,0 +1,13 @@ +package releases + +import ( + "io/ioutil" + "log" + "time" +) + +var ( + defaultInstallTimeout = 30 * time.Second + defaultListTimeout = 10 * time.Second + discardLogger = log.New(ioutil.Discard, "", 0) +) diff --git a/vendor/github.com/hashicorp/hc-install/releases/versions.go b/vendor/github.com/hashicorp/hc-install/releases/versions.go new file mode 100644 index 000000000..bf0f799fa --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/releases/versions.go @@ -0,0 +1,82 @@ +package releases + +import ( + "context" + "fmt" + "sort" + "time" + + "github.com/hashicorp/go-version" + rjson "github.com/hashicorp/hc-install/internal/releasesjson" + "github.com/hashicorp/hc-install/internal/validators" + "github.com/hashicorp/hc-install/product" + "github.com/hashicorp/hc-install/src" +) + +// Versions allows listing all versions of a product +// which match Constraints +type Versions struct { + Product product.Product + Constraints version.Constraints + + ListTimeout time.Duration + + // Install represents configuration for installation of any listed version + Install InstallationOptions +} + +type InstallationOptions struct { + Timeout time.Duration + Dir string + + SkipChecksumVerification bool + + // ArmoredPublicKey is a public PGP key in ASCII/armor format to use + // instead of built-in pubkey to verify signature of downloaded checksums + // during installation + ArmoredPublicKey string +} + +func (v *Versions) List(ctx context.Context) ([]src.Source, error) { + if !validators.IsProductNameValid(v.Product.Name) { + return nil, fmt.Errorf("invalid product name: %q", v.Product.Name) + } + + timeout := defaultListTimeout + if v.ListTimeout > 0 { + timeout = v.ListTimeout + } + ctx, cancelFunc := context.WithTimeout(ctx, timeout) + defer cancelFunc() + + r := rjson.NewReleases() + pvs, err := r.ListProductVersions(ctx, v.Product.Name) + if err != nil { + return nil, err + } + + versions := pvs.AsSlice() + sort.Stable(versions) + + installables := make([]src.Source, 0) + for _, pv := range versions { + if !v.Constraints.Check(pv.Version) { + // skip version which doesn't match constraint + continue + } + + ev := &ExactVersion{ + Product: v.Product, + Version: pv.Version, + InstallDir: v.Install.Dir, + Timeout: v.Install.Timeout, + + ArmoredPublicKey: v.Install.ArmoredPublicKey, + SkipChecksumVerification: v.Install.SkipChecksumVerification, + } + + installables = append(installables, ev) + } + + return installables, nil +} diff --git a/vendor/github.com/hashicorp/hc-install/src/src.go b/vendor/github.com/hashicorp/hc-install/src/src.go new file mode 100644 index 000000000..11fef7869 --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/src/src.go @@ -0,0 +1,42 @@ +package src + +import ( + "context" + "log" + + isrc "github.com/hashicorp/hc-install/internal/src" +) + +// Source represents an installer, finder, or builder +type Source interface { + IsSourceImpl() isrc.InstallSrcSigil +} + +type Installable interface { + Source + Install(ctx context.Context) (string, error) +} + +type Findable interface { + Source + Find(ctx context.Context) (string, error) +} + +type Buildable interface { + Source + Build(ctx context.Context) (string, error) +} + +type Validatable interface { + Source + Validate() error +} + +type Removable interface { + Source + Remove(ctx context.Context) error +} + +type LoggerSettable interface { + SetLogger(logger *log.Logger) +} diff --git a/vendor/github.com/hashicorp/terraform-exec/LICENSE b/vendor/github.com/hashicorp/terraform-exec/LICENSE new file mode 100644 index 000000000..a612ad981 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/terraform-exec/internal/version/version.go b/vendor/github.com/hashicorp/terraform-exec/internal/version/version.go new file mode 100644 index 000000000..2b19c59ea --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/internal/version/version.go @@ -0,0 +1,9 @@ +package version + +const version = "0.16.1" + +// ModuleVersion returns the current version of the github.com/hashicorp/terraform-exec Go module. +// This is a function to allow for future possible enhancement using debug.BuildInfo. +func ModuleVersion() string { + return version +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/apply.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/apply.go new file mode 100644 index 000000000..40d9e69b9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/apply.go @@ -0,0 +1,169 @@ +package tfexec + +import ( + "context" + "fmt" + "os/exec" + "strconv" +) + +type applyConfig struct { + backup string + dirOrPlan string + lock bool + + // LockTimeout must be a string with time unit, e.g. '10s' + lockTimeout string + parallelism int + reattachInfo ReattachInfo + refresh bool + replaceAddrs []string + state string + stateOut string + targets []string + + // Vars: each var must be supplied as a single string, e.g. 'foo=bar' + vars []string + varFiles []string +} + +var defaultApplyOptions = applyConfig{ + lock: true, + parallelism: 10, + refresh: true, +} + +// ApplyOption represents options used in the Apply method. +type ApplyOption interface { + configureApply(*applyConfig) +} + +func (opt *ParallelismOption) configureApply(conf *applyConfig) { + conf.parallelism = opt.parallelism +} + +func (opt *BackupOption) configureApply(conf *applyConfig) { + conf.backup = opt.path +} + +func (opt *TargetOption) configureApply(conf *applyConfig) { + conf.targets = append(conf.targets, opt.target) +} + +func (opt *LockTimeoutOption) configureApply(conf *applyConfig) { + conf.lockTimeout = opt.timeout +} + +func (opt *StateOption) configureApply(conf *applyConfig) { + conf.state = opt.path +} + +func (opt *StateOutOption) configureApply(conf *applyConfig) { + conf.stateOut = opt.path +} + +func (opt *VarFileOption) configureApply(conf *applyConfig) { + conf.varFiles = append(conf.varFiles, opt.path) +} + +func (opt *LockOption) configureApply(conf *applyConfig) { + conf.lock = opt.lock +} + +func (opt *RefreshOption) configureApply(conf *applyConfig) { + conf.refresh = opt.refresh +} + +func (opt *ReplaceOption) configureApply(conf *applyConfig) { + conf.replaceAddrs = append(conf.replaceAddrs, opt.address) +} + +func (opt *VarOption) configureApply(conf *applyConfig) { + conf.vars = append(conf.vars, opt.assignment) +} + +func (opt *DirOrPlanOption) configureApply(conf *applyConfig) { + conf.dirOrPlan = opt.path +} + +func (opt *ReattachOption) configureApply(conf *applyConfig) { + conf.reattachInfo = opt.info +} + +// Apply represents the terraform apply subcommand. +func (tf *Terraform) Apply(ctx context.Context, opts ...ApplyOption) error { + cmd, err := tf.applyCmd(ctx, opts...) + if err != nil { + return err + } + return tf.runTerraformCmd(ctx, cmd) +} + +func (tf *Terraform) applyCmd(ctx context.Context, opts ...ApplyOption) (*exec.Cmd, error) { + c := defaultApplyOptions + + for _, o := range opts { + o.configureApply(&c) + } + + args := []string{"apply", "-no-color", "-auto-approve", "-input=false"} + + // string opts: only pass if set + if c.backup != "" { + args = append(args, "-backup="+c.backup) + } + if c.lockTimeout != "" { + args = append(args, "-lock-timeout="+c.lockTimeout) + } + if c.state != "" { + args = append(args, "-state="+c.state) + } + if c.stateOut != "" { + args = append(args, "-state-out="+c.stateOut) + } + for _, vf := range c.varFiles { + args = append(args, "-var-file="+vf) + } + + // boolean and numerical opts: always pass + args = append(args, "-lock="+strconv.FormatBool(c.lock)) + args = append(args, "-parallelism="+fmt.Sprint(c.parallelism)) + args = append(args, "-refresh="+strconv.FormatBool(c.refresh)) + + // string slice opts: split into separate args + if c.replaceAddrs != nil { + err := tf.compatible(ctx, tf0_15_2, nil) + if err != nil { + return nil, fmt.Errorf("replace option was introduced in Terraform 0.15.2: %w", err) + } + for _, addr := range c.replaceAddrs { + args = append(args, "-replace="+addr) + } + } + if c.targets != nil { + for _, ta := range c.targets { + args = append(args, "-target="+ta) + } + } + if c.vars != nil { + for _, v := range c.vars { + args = append(args, "-var", v) + } + } + + // string argument: pass if set + if c.dirOrPlan != "" { + args = append(args, c.dirOrPlan) + } + + mergeEnv := map[string]string{} + if c.reattachInfo != nil { + reattachStr, err := c.reattachInfo.marshalString() + if err != nil { + return nil, err + } + mergeEnv[reattachEnvVar] = reattachStr + } + + return tf.buildTerraformCmd(ctx, mergeEnv, args...), nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd.go new file mode 100644 index 000000000..83abd22d5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd.go @@ -0,0 +1,232 @@ +package tfexec + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "strings" + + "github.com/hashicorp/terraform-exec/internal/version" +) + +const ( + checkpointDisableEnvVar = "CHECKPOINT_DISABLE" + cliArgsEnvVar = "TF_CLI_ARGS" + logEnvVar = "TF_LOG" + inputEnvVar = "TF_INPUT" + automationEnvVar = "TF_IN_AUTOMATION" + logPathEnvVar = "TF_LOG_PATH" + reattachEnvVar = "TF_REATTACH_PROVIDERS" + appendUserAgentEnvVar = "TF_APPEND_USER_AGENT" + workspaceEnvVar = "TF_WORKSPACE" + disablePluginTLSEnvVar = "TF_DISABLE_PLUGIN_TLS" + skipProviderVerifyEnvVar = "TF_SKIP_PROVIDER_VERIFY" + + varEnvVarPrefix = "TF_VAR_" + cliArgEnvVarPrefix = "TF_CLI_ARGS_" +) + +var prohibitedEnvVars = []string{ + cliArgsEnvVar, + inputEnvVar, + automationEnvVar, + logPathEnvVar, + logEnvVar, + reattachEnvVar, + appendUserAgentEnvVar, + workspaceEnvVar, + disablePluginTLSEnvVar, + skipProviderVerifyEnvVar, +} + +var prohibitedEnvVarPrefixes = []string{ + varEnvVarPrefix, + cliArgEnvVarPrefix, +} + +func manualEnvVars(env map[string]string, cb func(k string)) { + for k := range env { + for _, p := range prohibitedEnvVars { + if p == k { + cb(k) + goto NextEnvVar + } + } + for _, prefix := range prohibitedEnvVarPrefixes { + if strings.HasPrefix(k, prefix) { + cb(k) + goto NextEnvVar + } + } + NextEnvVar: + } +} + +// ProhibitedEnv returns a slice of environment variable keys that are not allowed +// to be set manually from the passed environment. +func ProhibitedEnv(env map[string]string) []string { + var p []string + manualEnvVars(env, func(k string) { + p = append(p, k) + }) + return p +} + +// CleanEnv removes any prohibited environment variables from an environment map. +func CleanEnv(dirty map[string]string) map[string]string { + clean := dirty + manualEnvVars(clean, func(k string) { + delete(clean, k) + }) + return clean +} + +func envMap(environ []string) map[string]string { + env := map[string]string{} + for _, ev := range environ { + parts := strings.SplitN(ev, "=", 2) + if len(parts) == 0 { + continue + } + k := parts[0] + v := "" + if len(parts) == 2 { + v = parts[1] + } + env[k] = v + } + return env +} + +func envSlice(environ map[string]string) []string { + env := []string{} + for k, v := range environ { + env = append(env, k+"="+v) + } + return env +} + +func (tf *Terraform) buildEnv(mergeEnv map[string]string) []string { + // set Terraform level env, if env is nil, fall back to os.Environ + var env map[string]string + if tf.env == nil { + env = envMap(os.Environ()) + } else { + env = make(map[string]string, len(tf.env)) + for k, v := range tf.env { + env[k] = v + } + } + + // override env with any command specific environment + for k, v := range mergeEnv { + env[k] = v + } + + // always propagate CHECKPOINT_DISABLE env var unless it is + // explicitly overridden with tf.SetEnv or command env + if _, ok := env[checkpointDisableEnvVar]; !ok { + env[checkpointDisableEnvVar] = os.Getenv(checkpointDisableEnvVar) + } + + // always override user agent + ua := mergeUserAgent( + os.Getenv(appendUserAgentEnvVar), + tf.appendUserAgent, + fmt.Sprintf("HashiCorp-terraform-exec/%s", version.ModuleVersion()), + ) + env[appendUserAgentEnvVar] = ua + + // always override logging + if tf.logPath == "" { + // so logging can't pollute our stderr output + env[logEnvVar] = "" + env[logPathEnvVar] = "" + } else { + env[logPathEnvVar] = tf.logPath + // Log levels other than TRACE are currently unreliable, the CLI recommends using TRACE only. + env[logEnvVar] = "TRACE" + } + + // constant automation override env vars + env[automationEnvVar] = "1" + + // force usage of workspace methods for switching + env[workspaceEnvVar] = "" + + if tf.disablePluginTLS { + env[disablePluginTLSEnvVar] = "1" + } + + if tf.skipProviderVerify { + env[skipProviderVerifyEnvVar] = "1" + } + + return envSlice(env) +} + +func (tf *Terraform) buildTerraformCmd(ctx context.Context, mergeEnv map[string]string, args ...string) *exec.Cmd { + cmd := exec.Command(tf.execPath, args...) + + cmd.Env = tf.buildEnv(mergeEnv) + cmd.Dir = tf.workingDir + + tf.logger.Printf("[INFO] running Terraform command: %s", cmd.String()) + + return cmd +} + +func (tf *Terraform) runTerraformCmdJSON(ctx context.Context, cmd *exec.Cmd, v interface{}) error { + var outbuf = bytes.Buffer{} + cmd.Stdout = mergeWriters(cmd.Stdout, &outbuf) + + err := tf.runTerraformCmd(ctx, cmd) + if err != nil { + return err + } + + dec := json.NewDecoder(&outbuf) + dec.UseNumber() + return dec.Decode(v) +} + +// mergeUserAgent does some minor deduplication to ensure we aren't +// just using the same append string over and over. +func mergeUserAgent(uas ...string) string { + included := map[string]bool{} + merged := []string{} + for _, ua := range uas { + ua = strings.TrimSpace(ua) + + if ua == "" { + continue + } + if included[ua] { + continue + } + included[ua] = true + merged = append(merged, ua) + } + return strings.Join(merged, " ") +} + +func mergeWriters(writers ...io.Writer) io.Writer { + compact := []io.Writer{} + for _, w := range writers { + if w != nil { + compact = append(compact, w) + } + } + if len(compact) == 0 { + return ioutil.Discard + } + if len(compact) == 1 { + return compact[0] + } + return io.MultiWriter(compact...) +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_default.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_default.go new file mode 100644 index 000000000..08a65bcde --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_default.go @@ -0,0 +1,46 @@ +//go:build !linux +// +build !linux + +package tfexec + +import ( + "context" + "os/exec" + "strings" +) + +func (tf *Terraform) runTerraformCmd(ctx context.Context, cmd *exec.Cmd) error { + var errBuf strings.Builder + + cmd.Stdout = mergeWriters(cmd.Stdout, tf.stdout) + cmd.Stderr = mergeWriters(cmd.Stderr, tf.stderr, &errBuf) + + go func() { + <-ctx.Done() + if ctx.Err() == context.DeadlineExceeded || ctx.Err() == context.Canceled { + if cmd != nil && cmd.Process != nil && cmd.ProcessState != nil { + err := cmd.Process.Kill() + if err != nil { + tf.logger.Printf("error from kill: %s", err) + } + } + } + }() + + // check for early cancellation + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + err := cmd.Run() + if err == nil && ctx.Err() != nil { + err = ctx.Err() + } + if err != nil { + return tf.wrapExitError(ctx, err, errBuf.String()) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_linux.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_linux.go new file mode 100644 index 000000000..7cbdcb96f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_linux.go @@ -0,0 +1,54 @@ +package tfexec + +import ( + "context" + "os/exec" + "strings" + "syscall" +) + +func (tf *Terraform) runTerraformCmd(ctx context.Context, cmd *exec.Cmd) error { + var errBuf strings.Builder + + cmd.Stdout = mergeWriters(cmd.Stdout, tf.stdout) + cmd.Stderr = mergeWriters(cmd.Stderr, tf.stderr, &errBuf) + + cmd.SysProcAttr = &syscall.SysProcAttr{ + // kill children if parent is dead + Pdeathsig: syscall.SIGKILL, + // set process group ID + Setpgid: true, + } + + go func() { + <-ctx.Done() + if ctx.Err() == context.DeadlineExceeded || ctx.Err() == context.Canceled { + if cmd != nil && cmd.Process != nil && cmd.ProcessState != nil { + // send SIGINT to process group + err := syscall.Kill(-cmd.Process.Pid, syscall.SIGINT) + if err != nil { + tf.logger.Printf("error from SIGINT: %s", err) + } + } + + // TODO: send a kill if it doesn't respond for a bit? + } + }() + + // check for early cancellation + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + err := cmd.Run() + if err == nil && ctx.Err() != nil { + err = ctx.Err() + } + if err != nil { + return tf.wrapExitError(ctx, err, errBuf.String()) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/destroy.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/destroy.go new file mode 100644 index 000000000..8011c0ba8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/destroy.go @@ -0,0 +1,156 @@ +package tfexec + +import ( + "context" + "fmt" + "os/exec" + "strconv" +) + +type destroyConfig struct { + backup string + dir string + lock bool + + // LockTimeout must be a string with time unit, e.g. '10s' + lockTimeout string + parallelism int + reattachInfo ReattachInfo + refresh bool + state string + stateOut string + targets []string + + // Vars: each var must be supplied as a single string, e.g. 'foo=bar' + vars []string + varFiles []string +} + +var defaultDestroyOptions = destroyConfig{ + lock: true, + lockTimeout: "0s", + parallelism: 10, + refresh: true, +} + +// DestroyOption represents options used in the Destroy method. +type DestroyOption interface { + configureDestroy(*destroyConfig) +} + +func (opt *DirOption) configureDestroy(conf *destroyConfig) { + conf.dir = opt.path +} + +func (opt *ParallelismOption) configureDestroy(conf *destroyConfig) { + conf.parallelism = opt.parallelism +} + +func (opt *BackupOption) configureDestroy(conf *destroyConfig) { + conf.backup = opt.path +} + +func (opt *TargetOption) configureDestroy(conf *destroyConfig) { + conf.targets = append(conf.targets, opt.target) +} + +func (opt *LockTimeoutOption) configureDestroy(conf *destroyConfig) { + conf.lockTimeout = opt.timeout +} + +func (opt *StateOption) configureDestroy(conf *destroyConfig) { + conf.state = opt.path +} + +func (opt *StateOutOption) configureDestroy(conf *destroyConfig) { + conf.stateOut = opt.path +} + +func (opt *VarFileOption) configureDestroy(conf *destroyConfig) { + conf.varFiles = append(conf.varFiles, opt.path) +} + +func (opt *LockOption) configureDestroy(conf *destroyConfig) { + conf.lock = opt.lock +} + +func (opt *RefreshOption) configureDestroy(conf *destroyConfig) { + conf.refresh = opt.refresh +} + +func (opt *VarOption) configureDestroy(conf *destroyConfig) { + conf.vars = append(conf.vars, opt.assignment) +} + +func (opt *ReattachOption) configureDestroy(conf *destroyConfig) { + conf.reattachInfo = opt.info +} + +// Destroy represents the terraform destroy subcommand. +func (tf *Terraform) Destroy(ctx context.Context, opts ...DestroyOption) error { + cmd, err := tf.destroyCmd(ctx, opts...) + if err != nil { + return err + } + return tf.runTerraformCmd(ctx, cmd) +} + +func (tf *Terraform) destroyCmd(ctx context.Context, opts ...DestroyOption) (*exec.Cmd, error) { + c := defaultDestroyOptions + + for _, o := range opts { + o.configureDestroy(&c) + } + + args := []string{"destroy", "-no-color", "-auto-approve", "-input=false"} + + // string opts: only pass if set + if c.backup != "" { + args = append(args, "-backup="+c.backup) + } + if c.lockTimeout != "" { + args = append(args, "-lock-timeout="+c.lockTimeout) + } + if c.state != "" { + args = append(args, "-state="+c.state) + } + if c.stateOut != "" { + args = append(args, "-state-out="+c.stateOut) + } + for _, vf := range c.varFiles { + args = append(args, "-var-file="+vf) + } + + // boolean and numerical opts: always pass + args = append(args, "-lock="+strconv.FormatBool(c.lock)) + args = append(args, "-parallelism="+fmt.Sprint(c.parallelism)) + args = append(args, "-refresh="+strconv.FormatBool(c.refresh)) + + // string slice opts: split into separate args + if c.targets != nil { + for _, ta := range c.targets { + args = append(args, "-target="+ta) + } + } + if c.vars != nil { + for _, v := range c.vars { + args = append(args, "-var", v) + } + } + + // optional positional argument + if c.dir != "" { + args = append(args, c.dir) + } + + mergeEnv := map[string]string{} + if c.reattachInfo != nil { + reattachStr, err := c.reattachInfo.marshalString() + if err != nil { + return nil, err + } + mergeEnv[reattachEnvVar] = reattachStr + } + + return tf.buildTerraformCmd(ctx, mergeEnv, args...), nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/doc.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/doc.go new file mode 100644 index 000000000..0e82bbd9f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/doc.go @@ -0,0 +1,4 @@ +// Package tfexec exposes functionality for constructing and running Terraform +// CLI commands. Structured return values use the data types defined in the +// github.com/hashicorp/terraform-json package. +package tfexec diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/errors.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/errors.go new file mode 100644 index 000000000..7a32ef2f1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/errors.go @@ -0,0 +1,39 @@ +package tfexec + +import "fmt" + +// this file contains non-parsed exported errors + +type ErrNoSuitableBinary struct { + err error +} + +func (e *ErrNoSuitableBinary) Error() string { + return fmt.Sprintf("no suitable terraform binary could be found: %s", e.err.Error()) +} + +func (e *ErrNoSuitableBinary) Unwrap() error { + return e.err +} + +// ErrVersionMismatch is returned when the detected Terraform version is not compatible with the +// command or flags being used in this invocation. +type ErrVersionMismatch struct { + MinInclusive string + MaxExclusive string + Actual string +} + +func (e *ErrVersionMismatch) Error() string { + return fmt.Sprintf("unexpected version %s (min: %s, max: %s)", e.Actual, e.MinInclusive, e.MaxExclusive) +} + +// ErrManualEnvVar is returned when an env var that should be set programatically via an option or method +// is set via the manual environment passing functions. +type ErrManualEnvVar struct { + Name string +} + +func (err *ErrManualEnvVar) Error() string { + return fmt.Sprintf("manual setting of env var %q detected", err.Name) +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/exit_errors.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/exit_errors.go new file mode 100644 index 000000000..ea25b2a56 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/exit_errors.go @@ -0,0 +1,331 @@ +package tfexec + +import ( + "context" + "fmt" + "os/exec" + "regexp" + "strings" + "text/template" +) + +// this file contains errors parsed from stderr + +var ( + // The "Required variable not set:" case is for 0.11 + missingVarErrRegexp = regexp.MustCompile(`Error: No value for required variable|Error: Required variable not set:`) + missingVarNameRegexp = regexp.MustCompile(`The root module input variable\s"(.+)"\sis\snot\sset,\sand\shas\sno\sdefault|Error: Required variable not set: (.+)`) + + usageRegexp = regexp.MustCompile(`Too many command line arguments|^Usage: .*Options:.*|Error: Invalid -\d+ option`) + + noInitErrRegexp = regexp.MustCompile( + // UNINITIALISED PROVIDERS/MODULES + `Error: Could not satisfy plugin requirements|` + + `Error: Could not load plugin|` + // v0.13 + `Please run \"terraform init\"|` + // v1.1.0 early alpha versions (ref 89b05050) + `run:\s+terraform init|` + // v1.1.0 (ref df578afd) + `Run\s+\"terraform init\"|` + // v1.2.0 + + // UNINITIALISED BACKENDS + `Error: Initialization required.|` + // v0.13 + `Error: Backend initialization required, please run \"terraform init\"`, // v0.15 + ) + + noConfigErrRegexp = regexp.MustCompile(`Error: No configuration files`) + + workspaceDoesNotExistRegexp = regexp.MustCompile(`Workspace "(.+)" doesn't exist.`) + + workspaceAlreadyExistsRegexp = regexp.MustCompile(`Workspace "(.+)" already exists`) + + tfVersionMismatchErrRegexp = regexp.MustCompile(`Error: The currently running version of Terraform doesn't meet the|Error: Unsupported Terraform Core version`) + tfVersionMismatchConstraintRegexp = regexp.MustCompile(`required_version = "(.+)"|Required version: (.+)\b`) + configInvalidErrRegexp = regexp.MustCompile(`There are some problems with the configuration, described below.`) + + stateLockErrRegexp = regexp.MustCompile(`Error acquiring the state lock`) + stateLockInfoRegexp = regexp.MustCompile(`Lock Info:\n\s*ID:\s*([^\n]+)\n\s*Path:\s*([^\n]+)\n\s*Operation:\s*([^\n]+)\n\s*Who:\s*([^\n]+)\n\s*Version:\s*([^\n]+)\n\s*Created:\s*([^\n]+)\n`) + statePlanReadErrRegexp = regexp.MustCompile( + `Terraform couldn't read the given file as a state or plan file.|` + + `Error: Failed to read the given file as a state or plan file`) +) + +func (tf *Terraform) wrapExitError(ctx context.Context, err error, stderr string) error { + exitErr, ok := err.(*exec.ExitError) + if !ok { + // not an exit error, short circuit, nothing to wrap + return err + } + + ctxErr := ctx.Err() + + // nothing to parse, return early + errString := strings.TrimSpace(stderr) + if errString == "" { + return &unwrapper{exitErr, ctxErr} + } + + switch { + case tfVersionMismatchErrRegexp.MatchString(stderr): + constraint := "" + constraints := tfVersionMismatchConstraintRegexp.FindStringSubmatch(stderr) + for i := 1; i < len(constraints); i++ { + constraint = strings.TrimSpace(constraints[i]) + if constraint != "" { + break + } + } + + if constraint == "" { + // hardcode a value here for weird cases (incl. 0.12) + constraint = "unknown" + } + + // only set this if it happened to be cached already + ver := "" + if tf != nil && tf.execVersion != nil { + ver = tf.execVersion.String() + } + + return &ErrTFVersionMismatch{ + unwrapper: unwrapper{exitErr, ctxErr}, + + Constraint: constraint, + TFVersion: ver, + } + case missingVarErrRegexp.MatchString(stderr): + name := "" + names := missingVarNameRegexp.FindStringSubmatch(stderr) + for i := 1; i < len(names); i++ { + name = strings.TrimSpace(names[i]) + if name != "" { + break + } + } + + return &ErrMissingVar{ + unwrapper: unwrapper{exitErr, ctxErr}, + + VariableName: name, + } + case usageRegexp.MatchString(stderr): + return &ErrCLIUsage{ + unwrapper: unwrapper{exitErr, ctxErr}, + + stderr: stderr, + } + case noInitErrRegexp.MatchString(stderr): + return &ErrNoInit{ + unwrapper: unwrapper{exitErr, ctxErr}, + + stderr: stderr, + } + case noConfigErrRegexp.MatchString(stderr): + return &ErrNoConfig{ + unwrapper: unwrapper{exitErr, ctxErr}, + + stderr: stderr, + } + case workspaceDoesNotExistRegexp.MatchString(stderr): + submatches := workspaceDoesNotExistRegexp.FindStringSubmatch(stderr) + if len(submatches) == 2 { + return &ErrNoWorkspace{ + unwrapper: unwrapper{exitErr, ctxErr}, + + Name: submatches[1], + } + } + case workspaceAlreadyExistsRegexp.MatchString(stderr): + submatches := workspaceAlreadyExistsRegexp.FindStringSubmatch(stderr) + if len(submatches) == 2 { + return &ErrWorkspaceExists{ + unwrapper: unwrapper{exitErr, ctxErr}, + + Name: submatches[1], + } + } + case configInvalidErrRegexp.MatchString(stderr): + return &ErrConfigInvalid{stderr: stderr} + case stateLockErrRegexp.MatchString(stderr): + submatches := stateLockInfoRegexp.FindStringSubmatch(stderr) + if len(submatches) == 7 { + return &ErrStateLocked{ + unwrapper: unwrapper{exitErr, ctxErr}, + + ID: submatches[1], + Path: submatches[2], + Operation: submatches[3], + Who: submatches[4], + Version: submatches[5], + Created: submatches[6], + } + } + case statePlanReadErrRegexp.MatchString(stderr): + return &ErrStatePlanRead{stderr: stderr} + } + + return fmt.Errorf("%w\n%s", &unwrapper{exitErr, ctxErr}, stderr) +} + +type unwrapper struct { + err error + ctxErr error +} + +func (u *unwrapper) Unwrap() error { + return u.err +} + +func (u *unwrapper) Is(target error) bool { + switch target { + case context.DeadlineExceeded, context.Canceled: + return u.ctxErr == context.DeadlineExceeded || + u.ctxErr == context.Canceled + } + return false +} + +func (u *unwrapper) Error() string { + return u.err.Error() +} + +type ErrConfigInvalid struct { + stderr string +} + +func (e *ErrConfigInvalid) Error() string { + return "configuration is invalid" +} + +type ErrMissingVar struct { + unwrapper + + VariableName string +} + +func (err *ErrMissingVar) Error() string { + return fmt.Sprintf("variable %q was required but not supplied", err.VariableName) +} + +type ErrNoWorkspace struct { + unwrapper + + Name string +} + +func (err *ErrNoWorkspace) Error() string { + return fmt.Sprintf("workspace %q does not exist", err.Name) +} + +// ErrWorkspaceExists is returned when creating a workspace that already exists +type ErrWorkspaceExists struct { + unwrapper + + Name string +} + +func (err *ErrWorkspaceExists) Error() string { + return fmt.Sprintf("workspace %q already exists", err.Name) +} + +type ErrNoInit struct { + unwrapper + + stderr string +} + +func (e *ErrNoInit) Error() string { + return e.stderr +} + +type ErrStatePlanRead struct { + unwrapper + + stderr string +} + +func (e *ErrStatePlanRead) Error() string { + return e.stderr +} + +type ErrNoConfig struct { + unwrapper + + stderr string +} + +func (e *ErrNoConfig) Error() string { + return e.stderr +} + +// ErrCLIUsage is returned when the combination of flags or arguments is incorrect. +// +// CLI indicates usage errors in three different ways: either +// 1. Exit 1, with a custom error message on stderr. +// 2. Exit 1, with command usage logged to stderr. +// 3. Exit 127, with command usage logged to stdout. +// Currently cases 1 and 2 are handled. +// TODO KEM: Handle exit 127 case. How does this work on non-Unix platforms? +type ErrCLIUsage struct { + unwrapper + + stderr string +} + +func (e *ErrCLIUsage) Error() string { + return e.stderr +} + +// ErrTFVersionMismatch is returned when the running Terraform version is not compatible with the +// value specified for required_version in the terraform block. +type ErrTFVersionMismatch struct { + unwrapper + + TFVersion string + + // Constraint is not returned in the error messaging on 0.12 + Constraint string +} + +func (e *ErrTFVersionMismatch) Error() string { + version := "version" + if e.TFVersion != "" { + version = e.TFVersion + } + + requirement := "" + if e.Constraint != "" { + requirement = fmt.Sprintf(" (%s required)", e.Constraint) + } + + return fmt.Sprintf("terraform %s not supported by configuration%s", + version, requirement) +} + +// ErrStateLocked is returned when the state lock is already held by another process. +type ErrStateLocked struct { + unwrapper + + ID string + Path string + Operation string + Who string + Version string + Created string +} + +func (e *ErrStateLocked) Error() string { + tmpl := `Lock Info: + ID: {{.ID}} + Path: {{.Path}} + Operation: {{.Operation}} + Who: {{.Who}} + Version: {{.Version}} + Created: {{.Created}} +` + + t := template.Must(template.New("LockInfo").Parse(tmpl)) + var out strings.Builder + if err := t.Execute(&out, e); err != nil { + return "error acquiring the state lock" + } + return fmt.Sprintf("error acquiring the state lock: %v", out.String()) +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/fmt.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/fmt.go new file mode 100644 index 000000000..2234c79fa --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/fmt.go @@ -0,0 +1,159 @@ +package tfexec + +import ( + "context" + "fmt" + "io" + "os/exec" + "path/filepath" + "strings" +) + +type formatConfig struct { + recursive bool + dir string +} + +var defaultFormatConfig = formatConfig{ + recursive: false, +} + +type FormatOption interface { + configureFormat(*formatConfig) +} + +func (opt *RecursiveOption) configureFormat(conf *formatConfig) { + conf.recursive = opt.recursive +} + +func (opt *DirOption) configureFormat(conf *formatConfig) { + conf.dir = opt.path +} + +// FormatString formats a passed string, given a path to Terraform. +func FormatString(ctx context.Context, execPath string, content string) (string, error) { + tf, err := NewTerraform(filepath.Dir(execPath), execPath) + if err != nil { + return "", err + } + + return tf.FormatString(ctx, content) +} + +// FormatString formats a passed string. +func (tf *Terraform) FormatString(ctx context.Context, content string) (string, error) { + in := strings.NewReader(content) + var outBuf strings.Builder + err := tf.Format(ctx, in, &outBuf) + if err != nil { + return "", err + } + return outBuf.String(), nil +} + +// Format performs formatting on the unformatted io.Reader (as stdin to the CLI) and returns +// the formatted result on the formatted io.Writer. +func (tf *Terraform) Format(ctx context.Context, unformatted io.Reader, formatted io.Writer) error { + cmd, err := tf.formatCmd(ctx, nil, Dir("-")) + if err != nil { + return err + } + + cmd.Stdin = unformatted + cmd.Stdout = mergeWriters(cmd.Stdout, formatted) + + return tf.runTerraformCmd(ctx, cmd) +} + +// FormatWrite attempts to format and modify all config files in the working or selected (via DirOption) directory. +func (tf *Terraform) FormatWrite(ctx context.Context, opts ...FormatOption) error { + for _, o := range opts { + switch o := o.(type) { + case *DirOption: + if o.path == "-" { + return fmt.Errorf("a path of \"-\" is not supported for this method, please use FormatString") + } + } + } + + cmd, err := tf.formatCmd(ctx, []string{"-write=true", "-list=false", "-diff=false"}, opts...) + if err != nil { + return err + } + + return tf.runTerraformCmd(ctx, cmd) +} + +// FormatCheck returns true if the config files in the working or selected (via DirOption) directory are already formatted. +func (tf *Terraform) FormatCheck(ctx context.Context, opts ...FormatOption) (bool, []string, error) { + for _, o := range opts { + switch o := o.(type) { + case *DirOption: + if o.path == "-" { + return false, nil, fmt.Errorf("a path of \"-\" is not supported for this method, please use FormatString") + } + } + } + + cmd, err := tf.formatCmd(ctx, []string{"-write=false", "-list=true", "-diff=false", "-check=true"}, opts...) + if err != nil { + return false, nil, err + } + + var outBuf strings.Builder + cmd.Stdout = mergeWriters(cmd.Stdout, &outBuf) + + err = tf.runTerraformCmd(ctx, cmd) + if err == nil { + return true, nil, nil + } + if cmd.ProcessState.ExitCode() == 3 { + // unformatted, parse the file list + + files := []string{} + lines := strings.Split(strings.Replace(outBuf.String(), "\r\n", "\n", -1), "\n") + for _, l := range lines { + l = strings.TrimSpace(l) + if l == "" { + continue + } + files = append(files, l) + } + + return false, files, nil + } + return false, nil, err +} + +func (tf *Terraform) formatCmd(ctx context.Context, args []string, opts ...FormatOption) (*exec.Cmd, error) { + err := tf.compatible(ctx, tf0_7_7, nil) + if err != nil { + return nil, fmt.Errorf("fmt was first introduced in Terraform 0.7.7: %w", err) + } + + c := defaultFormatConfig + + for _, o := range opts { + switch o.(type) { + case *RecursiveOption: + err := tf.compatible(ctx, tf0_12_0, nil) + if err != nil { + return nil, fmt.Errorf("-recursive was added to fmt in Terraform 0.12: %w", err) + } + } + + o.configureFormat(&c) + } + + args = append([]string{"fmt", "-no-color"}, args...) + + if c.recursive { + args = append(args, "-recursive") + } + + if c.dir != "" { + args = append(args, c.dir) + } + + return tf.buildTerraformCmd(ctx, nil, args...), nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/force_unlock.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/force_unlock.go new file mode 100644 index 000000000..c8dddffa1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/force_unlock.go @@ -0,0 +1,50 @@ +package tfexec + +import ( + "context" + "os/exec" +) + +type forceUnlockConfig struct { + dir string +} + +var defaultForceUnlockOptions = forceUnlockConfig{} + +type ForceUnlockOption interface { + configureForceUnlock(*forceUnlockConfig) +} + +func (opt *DirOption) configureForceUnlock(conf *forceUnlockConfig) { + conf.dir = opt.path +} + +// ForceUnlock represents the `terraform force-unlock` command +func (tf *Terraform) ForceUnlock(ctx context.Context, lockID string, opts ...ForceUnlockOption) error { + unlockCmd := tf.forceUnlockCmd(ctx, lockID, opts...) + + if err := tf.runTerraformCmd(ctx, unlockCmd); err != nil { + return err + } + + return nil +} + +func (tf *Terraform) forceUnlockCmd(ctx context.Context, lockID string, opts ...ForceUnlockOption) *exec.Cmd { + c := defaultForceUnlockOptions + + for _, o := range opts { + o.configureForceUnlock(&c) + } + args := []string{"force-unlock", "-force"} + + // positional arguments + args = append(args, lockID) + + // optional positional arguments + if c.dir != "" { + args = append(args, c.dir) + } + + return tf.buildTerraformCmd(ctx, nil, args...) +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/get.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/get.go new file mode 100644 index 000000000..5bac9b197 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/get.go @@ -0,0 +1,52 @@ +package tfexec + +import ( + "context" + "fmt" + "os/exec" +) + +type getCmdConfig struct { + dir string + update bool +} + +// GetCmdOption represents options used in the Get method. +type GetCmdOption interface { + configureGet(*getCmdConfig) +} + +func (opt *DirOption) configureGet(conf *getCmdConfig) { + conf.dir = opt.path +} + +func (opt *UpdateOption) configureGet(conf *getCmdConfig) { + conf.update = opt.update +} + +// Get represents the terraform get subcommand. +func (tf *Terraform) Get(ctx context.Context, opts ...GetCmdOption) error { + cmd, err := tf.getCmd(ctx, opts...) + if err != nil { + return err + } + return tf.runTerraformCmd(ctx, cmd) +} + +func (tf *Terraform) getCmd(ctx context.Context, opts ...GetCmdOption) (*exec.Cmd, error) { + c := getCmdConfig{} + + for _, o := range opts { + o.configureGet(&c) + } + + args := []string{"get", "-no-color"} + + args = append(args, "-update="+fmt.Sprint(c.update)) + + if c.dir != "" { + args = append(args, c.dir) + } + + return tf.buildTerraformCmd(ctx, nil, args...), nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/graph.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/graph.go new file mode 100644 index 000000000..73396280b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/graph.go @@ -0,0 +1,85 @@ +package tfexec + +import ( + "context" + "fmt" + "os/exec" + "strings" +) + +type graphConfig struct { + plan string + drawCycles bool + graphType string +} + +var defaultGraphOptions = graphConfig{} + +type GraphOption interface { + configureGraph(*graphConfig) +} + +func (opt *GraphPlanOption) configureGraph(conf *graphConfig) { + conf.plan = opt.file +} + +func (opt *DrawCyclesOption) configureGraph(conf *graphConfig) { + conf.drawCycles = opt.drawCycles +} + +func (opt *GraphTypeOption) configureGraph(conf *graphConfig) { + conf.graphType = opt.graphType +} + +func (tf *Terraform) Graph(ctx context.Context, opts ...GraphOption) (string, error) { + graphCmd, err := tf.graphCmd(ctx, opts...) + if err != nil { + return "", err + } + var outBuf strings.Builder + graphCmd.Stdout = &outBuf + err = tf.runTerraformCmd(ctx, graphCmd) + if err != nil { + return "", err + } + + return outBuf.String(), nil + +} + +func (tf *Terraform) graphCmd(ctx context.Context, opts ...GraphOption) (*exec.Cmd, error) { + c := defaultGraphOptions + + for _, o := range opts { + o.configureGraph(&c) + } + + args := []string{"graph"} + + if c.plan != "" { + // plan was a positional arguement prior to Terraform 0.15.0. Ensure proper use by checking version. + if err := tf.compatible(ctx, tf0_15_0, nil); err == nil { + args = append(args, "-plan="+c.plan) + } else { + args = append(args, c.plan) + } + } + + if c.drawCycles { + err := tf.compatible(ctx, tf0_5_0, nil) + if err != nil { + return nil, fmt.Errorf("-draw-cycles was first introduced in Terraform 0.5.0: %w", err) + } + args = append(args, "-draw-cycles") + } + + if c.graphType != "" { + err := tf.compatible(ctx, tf0_8_0, nil) + if err != nil { + return nil, fmt.Errorf("-graph-type was first introduced in Terraform 0.8.0: %w", err) + } + args = append(args, "-type="+c.graphType) + } + + return tf.buildTerraformCmd(ctx, nil, args...), nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/import.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/import.go new file mode 100644 index 000000000..e243d7281 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/import.go @@ -0,0 +1,141 @@ +package tfexec + +import ( + "context" + "os/exec" + "strconv" +) + +type importConfig struct { + addr string + id string + backup string + config string + allowMissingConfig bool + lock bool + lockTimeout string + reattachInfo ReattachInfo + state string + stateOut string + vars []string + varFiles []string +} + +var defaultImportOptions = importConfig{ + allowMissingConfig: false, + lock: true, + lockTimeout: "0s", +} + +// ImportOption represents options used in the Import method. +type ImportOption interface { + configureImport(*importConfig) +} + +func (opt *BackupOption) configureImport(conf *importConfig) { + conf.backup = opt.path +} + +func (opt *ConfigOption) configureImport(conf *importConfig) { + conf.config = opt.path +} + +func (opt *AllowMissingConfigOption) configureImport(conf *importConfig) { + conf.allowMissingConfig = opt.allowMissingConfig +} + +func (opt *LockOption) configureImport(conf *importConfig) { + conf.lock = opt.lock +} + +func (opt *LockTimeoutOption) configureImport(conf *importConfig) { + conf.lockTimeout = opt.timeout +} + +func (opt *ReattachOption) configureImport(conf *importConfig) { + conf.reattachInfo = opt.info +} + +func (opt *StateOption) configureImport(conf *importConfig) { + conf.state = opt.path +} + +func (opt *StateOutOption) configureImport(conf *importConfig) { + conf.stateOut = opt.path +} + +func (opt *VarOption) configureImport(conf *importConfig) { + conf.vars = append(conf.vars, opt.assignment) +} + +func (opt *VarFileOption) configureImport(conf *importConfig) { + conf.varFiles = append(conf.varFiles, opt.path) +} + +// Import represents the terraform import subcommand. +func (tf *Terraform) Import(ctx context.Context, address, id string, opts ...ImportOption) error { + cmd, err := tf.importCmd(ctx, address, id, opts...) + if err != nil { + return err + } + return tf.runTerraformCmd(ctx, cmd) +} + +func (tf *Terraform) importCmd(ctx context.Context, address, id string, opts ...ImportOption) (*exec.Cmd, error) { + c := defaultImportOptions + + for _, o := range opts { + o.configureImport(&c) + } + + args := []string{"import", "-no-color", "-input=false"} + + // string opts: only pass if set + if c.backup != "" { + args = append(args, "-backup="+c.backup) + } + if c.config != "" { + args = append(args, "-config="+c.config) + } + if c.lockTimeout != "" { + args = append(args, "-lock-timeout="+c.lockTimeout) + } + if c.state != "" { + args = append(args, "-state="+c.state) + } + if c.stateOut != "" { + args = append(args, "-state-out="+c.stateOut) + } + for _, vf := range c.varFiles { + args = append(args, "-var-file="+vf) + } + + // boolean and numerical opts: always pass + args = append(args, "-lock="+strconv.FormatBool(c.lock)) + + // unary flags: pass if true + if c.allowMissingConfig { + args = append(args, "-allow-missing-config") + } + + // string slice opts: split into separate args + if c.vars != nil { + for _, v := range c.vars { + args = append(args, "-var", v) + } + } + + // required args, always pass + args = append(args, address, id) + + mergeEnv := map[string]string{} + if c.reattachInfo != nil { + reattachStr, err := c.reattachInfo.marshalString() + if err != nil { + return nil, err + } + mergeEnv[reattachEnvVar] = reattachStr + } + + return tf.buildTerraformCmd(ctx, mergeEnv, args...), nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/init.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/init.go new file mode 100644 index 000000000..bff9ecd3e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/init.go @@ -0,0 +1,179 @@ +package tfexec + +import ( + "context" + "fmt" + "os/exec" +) + +type initConfig struct { + backend bool + backendConfig []string + dir string + forceCopy bool + fromModule string + get bool + getPlugins bool + lock bool + lockTimeout string + pluginDir []string + reattachInfo ReattachInfo + reconfigure bool + upgrade bool + verifyPlugins bool +} + +var defaultInitOptions = initConfig{ + backend: true, + forceCopy: false, + get: true, + getPlugins: true, + lock: true, + lockTimeout: "0s", + reconfigure: false, + upgrade: false, + verifyPlugins: true, +} + +// InitOption represents options used in the Init method. +type InitOption interface { + configureInit(*initConfig) +} + +func (opt *BackendOption) configureInit(conf *initConfig) { + conf.backend = opt.backend +} + +func (opt *BackendConfigOption) configureInit(conf *initConfig) { + conf.backendConfig = append(conf.backendConfig, opt.path) +} + +func (opt *DirOption) configureInit(conf *initConfig) { + conf.dir = opt.path +} + +func (opt *FromModuleOption) configureInit(conf *initConfig) { + conf.fromModule = opt.source +} + +func (opt *GetOption) configureInit(conf *initConfig) { + conf.get = opt.get +} + +func (opt *GetPluginsOption) configureInit(conf *initConfig) { + conf.getPlugins = opt.getPlugins +} + +func (opt *LockOption) configureInit(conf *initConfig) { + conf.lock = opt.lock +} + +func (opt *LockTimeoutOption) configureInit(conf *initConfig) { + conf.lockTimeout = opt.timeout +} + +func (opt *PluginDirOption) configureInit(conf *initConfig) { + conf.pluginDir = append(conf.pluginDir, opt.pluginDir) +} + +func (opt *ReattachOption) configureInit(conf *initConfig) { + conf.reattachInfo = opt.info +} + +func (opt *ReconfigureOption) configureInit(conf *initConfig) { + conf.reconfigure = opt.reconfigure +} + +func (opt *UpgradeOption) configureInit(conf *initConfig) { + conf.upgrade = opt.upgrade +} + +func (opt *VerifyPluginsOption) configureInit(conf *initConfig) { + conf.verifyPlugins = opt.verifyPlugins +} + +// Init represents the terraform init subcommand. +func (tf *Terraform) Init(ctx context.Context, opts ...InitOption) error { + cmd, err := tf.initCmd(ctx, opts...) + if err != nil { + return err + } + return tf.runTerraformCmd(ctx, cmd) +} + +func (tf *Terraform) initCmd(ctx context.Context, opts ...InitOption) (*exec.Cmd, error) { + c := defaultInitOptions + + for _, o := range opts { + switch o.(type) { + case *LockOption, *LockTimeoutOption, *VerifyPluginsOption, *GetPluginsOption: + err := tf.compatible(ctx, nil, tf0_15_0) + if err != nil { + return nil, fmt.Errorf("-lock, -lock-timeout, -verify-plugins, and -get-plugins options are no longer available as of Terraform 0.15: %w", err) + } + } + + o.configureInit(&c) + } + + args := []string{"init", "-no-color", "-force-copy", "-input=false"} + + // string opts: only pass if set + if c.fromModule != "" { + args = append(args, "-from-module="+c.fromModule) + } + + // string opts removed in 0.15: pass if set and <0.15 + err := tf.compatible(ctx, nil, tf0_15_0) + if err == nil { + if c.lockTimeout != "" { + args = append(args, "-lock-timeout="+c.lockTimeout) + } + } + + // boolean opts: always pass + args = append(args, "-backend="+fmt.Sprint(c.backend)) + args = append(args, "-get="+fmt.Sprint(c.get)) + args = append(args, "-upgrade="+fmt.Sprint(c.upgrade)) + + // boolean opts removed in 0.15: pass if <0.15 + err = tf.compatible(ctx, nil, tf0_15_0) + if err == nil { + args = append(args, "-lock="+fmt.Sprint(c.lock)) + args = append(args, "-get-plugins="+fmt.Sprint(c.getPlugins)) + args = append(args, "-verify-plugins="+fmt.Sprint(c.verifyPlugins)) + } + + // unary flags: pass if true + if c.reconfigure { + args = append(args, "-reconfigure") + } + + // string slice opts: split into separate args + if c.backendConfig != nil { + for _, bc := range c.backendConfig { + args = append(args, "-backend-config="+bc) + } + } + if c.pluginDir != nil { + for _, pd := range c.pluginDir { + args = append(args, "-plugin-dir="+pd) + } + } + + // optional positional argument + if c.dir != "" { + args = append(args, c.dir) + } + + mergeEnv := map[string]string{} + if c.reattachInfo != nil { + reattachStr, err := c.reattachInfo.marshalString() + if err != nil { + return nil, err + } + mergeEnv[reattachEnvVar] = reattachStr + } + + return tf.buildTerraformCmd(ctx, mergeEnv, args...), nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/options.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/options.go new file mode 100644 index 000000000..ad3cc65c6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/options.go @@ -0,0 +1,411 @@ +package tfexec + +import ( + "encoding/json" +) + +// AllowMissingConfigOption represents the -allow-missing-config flag. +type AllowMissingConfigOption struct { + allowMissingConfig bool +} + +// AllowMissingConfig represents the -allow-missing-config flag. +func AllowMissingConfig(allowMissingConfig bool) *AllowMissingConfigOption { + return &AllowMissingConfigOption{allowMissingConfig} +} + +// AllowMissingOption represents the -allow-missing flag. +type AllowMissingOption struct { + allowMissing bool +} + +// AllowMissing represents the -allow-missing flag. +func AllowMissing(allowMissing bool) *AllowMissingOption { + return &AllowMissingOption{allowMissing} +} + +// BackendOption represents the -backend flag. +type BackendOption struct { + backend bool +} + +// Backend represents the -backend flag. +func Backend(backend bool) *BackendOption { + return &BackendOption{backend} +} + +// BackendConfigOption represents the -backend-config flag. +type BackendConfigOption struct { + path string +} + +// BackendConfig represents the -backend-config flag. +func BackendConfig(backendConfig string) *BackendConfigOption { + return &BackendConfigOption{backendConfig} +} + +type BackupOutOption struct { + path string +} + +// BackupOutOption represents the -backup-out flag. +func BackupOut(path string) *BackupOutOption { + return &BackupOutOption{path} +} + +// BackupOption represents the -backup flag. +type BackupOption struct { + path string +} + +// Backup represents the -backup flag. +func Backup(path string) *BackupOption { + return &BackupOption{path} +} + +// DisableBackup is a convenience method for Backup("-"), indicating backup state should be disabled. +func DisableBackup() *BackupOption { + return &BackupOption{"-"} +} + +// ConfigOption represents the -config flag. +type ConfigOption struct { + path string +} + +// Config represents the -config flag. +func Config(path string) *ConfigOption { + return &ConfigOption{path} +} + +// CopyStateOption represents the -state flag for terraform workspace new. This flag is used +// to copy an existing state file in to the new workspace. +type CopyStateOption struct { + path string +} + +// CopyState represents the -state flag for terraform workspace new. This flag is used +// to copy an existing state file in to the new workspace. +func CopyState(path string) *CopyStateOption { + return &CopyStateOption{path} +} + +type DirOption struct { + path string +} + +func Dir(path string) *DirOption { + return &DirOption{path} +} + +type DirOrPlanOption struct { + path string +} + +func DirOrPlan(path string) *DirOrPlanOption { + return &DirOrPlanOption{path} +} + +// DestroyFlagOption represents the -destroy flag. +type DestroyFlagOption struct { + // named to prevent conflict with DestroyOption interface + + destroy bool +} + +// Destroy represents the -destroy flag. +func Destroy(destroy bool) *DestroyFlagOption { + return &DestroyFlagOption{destroy} +} + +type DrawCyclesOption struct { + drawCycles bool +} + +// DrawCycles represents the -draw-cycles flag. +func DrawCycles(drawCycles bool) *DrawCyclesOption { + return &DrawCyclesOption{drawCycles} +} + +type DryRunOption struct { + dryRun bool +} + +// DryRun represents the -dry-run flag. +func DryRun(dryRun bool) *DryRunOption { + return &DryRunOption{dryRun} +} + +type FSMirrorOption struct { + fsMirror string +} + +// FSMirror represents the -fs-mirror option (path to filesystem mirror directory) +func FSMirror(fsMirror string) *FSMirrorOption { + return &FSMirrorOption{fsMirror} +} + +type ForceOption struct { + force bool +} + +func Force(force bool) *ForceOption { + return &ForceOption{force} +} + +type ForceCopyOption struct { + forceCopy bool +} + +func ForceCopy(forceCopy bool) *ForceCopyOption { + return &ForceCopyOption{forceCopy} +} + +type FromModuleOption struct { + source string +} + +func FromModule(source string) *FromModuleOption { + return &FromModuleOption{source} +} + +type GetOption struct { + get bool +} + +func Get(get bool) *GetOption { + return &GetOption{get} +} + +type GetPluginsOption struct { + getPlugins bool +} + +func GetPlugins(getPlugins bool) *GetPluginsOption { + return &GetPluginsOption{getPlugins} +} + +// LockOption represents the -lock flag. +type LockOption struct { + lock bool +} + +// Lock represents the -lock flag. +func Lock(lock bool) *LockOption { + return &LockOption{lock} +} + +// LockTimeoutOption represents the -lock-timeout flag. +type LockTimeoutOption struct { + timeout string +} + +// LockTimeout represents the -lock-timeout flag. +func LockTimeout(lockTimeout string) *LockTimeoutOption { + // TODO: should this just use a duration instead? + return &LockTimeoutOption{lockTimeout} +} + +type NetMirrorOption struct { + netMirror string +} + +// NetMirror represents the -net-mirror option (base URL of a network mirror) +func NetMirror(netMirror string) *NetMirrorOption { + return &NetMirrorOption{netMirror} +} + +type OutOption struct { + path string +} + +func Out(path string) *OutOption { + return &OutOption{path} +} + +type ParallelismOption struct { + parallelism int +} + +func Parallelism(n int) *ParallelismOption { + return &ParallelismOption{n} +} + +type GraphPlanOption struct { + file string +} + +// GraphPlan represents the -plan flag which is a specified plan file string +func GraphPlan(file string) *GraphPlanOption { + return &GraphPlanOption{file} +} + +type PlatformOption struct { + platform string +} + +// Platform represents the -platform flag which is an os_arch string +func Platform(platform string) *PlatformOption { + return &PlatformOption{platform} +} + +type PluginDirOption struct { + pluginDir string +} + +func PluginDir(pluginDir string) *PluginDirOption { + return &PluginDirOption{pluginDir} +} + +type ProviderOption struct { + provider string +} + +// Provider represents the positional argument (provider source address) +func Provider(providers string) *ProviderOption { + return &ProviderOption{providers} +} + +type ReattachInfo map[string]ReattachConfig + +// ReattachConfig holds the information Terraform needs to be able to attach +// itself to a provider process, so it can drive the process. +type ReattachConfig struct { + Protocol string + ProtocolVersion int + Pid int + Test bool + Addr ReattachConfigAddr +} + +// ReattachConfigAddr is a JSON-encoding friendly version of net.Addr. +type ReattachConfigAddr struct { + Network string + String string +} + +type ReattachOption struct { + info ReattachInfo +} + +func (info ReattachInfo) marshalString() (string, error) { + reattachStr, err := json.Marshal(info) + if err != nil { + return "", err + } + return string(reattachStr), nil +} + +func Reattach(info ReattachInfo) *ReattachOption { + return &ReattachOption{info} +} + +type ReconfigureOption struct { + reconfigure bool +} + +func Reconfigure(reconfigure bool) *ReconfigureOption { + return &ReconfigureOption{reconfigure} +} + +type RecursiveOption struct { + recursive bool +} + +func Recursive(r bool) *RecursiveOption { + return &RecursiveOption{r} +} + +type RefreshOption struct { + refresh bool +} + +func Refresh(refresh bool) *RefreshOption { + return &RefreshOption{refresh} +} + +type ReplaceOption struct { + address string +} + +func Replace(address string) *ReplaceOption { + return &ReplaceOption{address} +} + +type StateOption struct { + path string +} + +// State represents the -state flag. +// +// Deprecated: The -state CLI flag is a legacy flag and should not be used. +// If you need a different state file for every run, you can instead use the +// local backend. +// See https://github.com/hashicorp/terraform/issues/25920#issuecomment-676560799 +func State(path string) *StateOption { + return &StateOption{path} +} + +type StateOutOption struct { + path string +} + +func StateOut(path string) *StateOutOption { + return &StateOutOption{path} +} + +type TargetOption struct { + target string +} + +func Target(resource string) *TargetOption { + return &TargetOption{resource} +} + +type GraphTypeOption struct { + graphType string +} + +func GraphType(graphType string) *GraphTypeOption { + return &GraphTypeOption{graphType} +} + +type UpdateOption struct { + update bool +} + +func Update(update bool) *UpdateOption { + return &UpdateOption{update} +} + +type UpgradeOption struct { + upgrade bool +} + +func Upgrade(upgrade bool) *UpgradeOption { + return &UpgradeOption{upgrade} +} + +type VarOption struct { + assignment string +} + +func Var(assignment string) *VarOption { + return &VarOption{assignment} +} + +type VarFileOption struct { + path string +} + +func VarFile(path string) *VarFileOption { + return &VarFileOption{path} +} + +type VerifyPluginsOption struct { + verifyPlugins bool +} + +func VerifyPlugins(verifyPlugins bool) *VerifyPluginsOption { + return &VerifyPluginsOption{verifyPlugins} +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/output.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/output.go new file mode 100644 index 000000000..b16b8b728 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/output.go @@ -0,0 +1,63 @@ +package tfexec + +import ( + "context" + "encoding/json" + "os/exec" +) + +type outputConfig struct { + state string + json bool +} + +var defaultOutputOptions = outputConfig{} + +// OutputOption represents options used in the Output method. +type OutputOption interface { + configureOutput(*outputConfig) +} + +func (opt *StateOption) configureOutput(conf *outputConfig) { + conf.state = opt.path +} + +// OutputMeta represents the JSON output of 'terraform output -json', +// which resembles state format version 3 due to a historical accident. +// Please see hashicorp/terraform/command/output.go. +// TODO KEM: Should this type be in terraform-json? +type OutputMeta struct { + Sensitive bool `json:"sensitive"` + Type json.RawMessage `json:"type"` + Value json.RawMessage `json:"value"` +} + +// Output represents the terraform output subcommand. +func (tf *Terraform) Output(ctx context.Context, opts ...OutputOption) (map[string]OutputMeta, error) { + outputCmd := tf.outputCmd(ctx, opts...) + + outputs := map[string]OutputMeta{} + err := tf.runTerraformCmdJSON(ctx, outputCmd, &outputs) + if err != nil { + return nil, err + } + + return outputs, nil +} + +func (tf *Terraform) outputCmd(ctx context.Context, opts ...OutputOption) *exec.Cmd { + c := defaultOutputOptions + + for _, o := range opts { + o.configureOutput(&c) + } + + args := []string{"output", "-no-color", "-json"} + + // string opts: only pass if set + if c.state != "" { + args = append(args, "-state="+c.state) + } + + return tf.buildTerraformCmd(ctx, nil, args...) +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/plan.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/plan.go new file mode 100644 index 000000000..bf41094bb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/plan.go @@ -0,0 +1,180 @@ +package tfexec + +import ( + "context" + "fmt" + "os/exec" + "strconv" +) + +type planConfig struct { + destroy bool + dir string + lock bool + lockTimeout string + out string + parallelism int + reattachInfo ReattachInfo + refresh bool + replaceAddrs []string + state string + targets []string + vars []string + varFiles []string +} + +var defaultPlanOptions = planConfig{ + destroy: false, + lock: true, + lockTimeout: "0s", + parallelism: 10, + refresh: true, +} + +// PlanOption represents options used in the Plan method. +type PlanOption interface { + configurePlan(*planConfig) +} + +func (opt *DirOption) configurePlan(conf *planConfig) { + conf.dir = opt.path +} + +func (opt *VarFileOption) configurePlan(conf *planConfig) { + conf.varFiles = append(conf.varFiles, opt.path) +} + +func (opt *VarOption) configurePlan(conf *planConfig) { + conf.vars = append(conf.vars, opt.assignment) +} + +func (opt *TargetOption) configurePlan(conf *planConfig) { + conf.targets = append(conf.targets, opt.target) +} + +func (opt *StateOption) configurePlan(conf *planConfig) { + conf.state = opt.path +} + +func (opt *ReattachOption) configurePlan(conf *planConfig) { + conf.reattachInfo = opt.info +} + +func (opt *RefreshOption) configurePlan(conf *planConfig) { + conf.refresh = opt.refresh +} + +func (opt *ReplaceOption) configurePlan(conf *planConfig) { + conf.replaceAddrs = append(conf.replaceAddrs, opt.address) +} + +func (opt *ParallelismOption) configurePlan(conf *planConfig) { + conf.parallelism = opt.parallelism +} + +func (opt *OutOption) configurePlan(conf *planConfig) { + conf.out = opt.path +} + +func (opt *LockTimeoutOption) configurePlan(conf *planConfig) { + conf.lockTimeout = opt.timeout +} + +func (opt *LockOption) configurePlan(conf *planConfig) { + conf.lock = opt.lock +} + +func (opt *DestroyFlagOption) configurePlan(conf *planConfig) { + conf.destroy = opt.destroy +} + +// Plan executes `terraform plan` with the specified options and waits for it +// to complete. +// +// The returned boolean is false when the plan diff is empty (no changes) and +// true when the plan diff is non-empty (changes present). +// +// The returned error is nil if `terraform plan` has been executed and exits +// with either 0 or 2. +func (tf *Terraform) Plan(ctx context.Context, opts ...PlanOption) (bool, error) { + cmd, err := tf.planCmd(ctx, opts...) + if err != nil { + return false, err + } + err = tf.runTerraformCmd(ctx, cmd) + if err != nil && cmd.ProcessState.ExitCode() == 2 { + return true, nil + } + return false, err +} + +func (tf *Terraform) planCmd(ctx context.Context, opts ...PlanOption) (*exec.Cmd, error) { + c := defaultPlanOptions + + for _, o := range opts { + o.configurePlan(&c) + } + + args := []string{"plan", "-no-color", "-input=false", "-detailed-exitcode"} + + // string opts: only pass if set + if c.lockTimeout != "" { + args = append(args, "-lock-timeout="+c.lockTimeout) + } + if c.out != "" { + args = append(args, "-out="+c.out) + } + if c.state != "" { + args = append(args, "-state="+c.state) + } + for _, vf := range c.varFiles { + args = append(args, "-var-file="+vf) + } + + // boolean and numerical opts: always pass + args = append(args, "-lock="+strconv.FormatBool(c.lock)) + args = append(args, "-parallelism="+fmt.Sprint(c.parallelism)) + args = append(args, "-refresh="+strconv.FormatBool(c.refresh)) + + // unary flags: pass if true + if c.replaceAddrs != nil { + err := tf.compatible(ctx, tf0_15_2, nil) + if err != nil { + return nil, fmt.Errorf("replace option was introduced in Terraform 0.15.2: %w", err) + } + for _, addr := range c.replaceAddrs { + args = append(args, "-replace="+addr) + } + } + if c.destroy { + args = append(args, "-destroy") + } + + // string slice opts: split into separate args + if c.targets != nil { + for _, ta := range c.targets { + args = append(args, "-target="+ta) + } + } + if c.vars != nil { + for _, v := range c.vars { + args = append(args, "-var", v) + } + } + + // optional positional argument + if c.dir != "" { + args = append(args, c.dir) + } + + mergeEnv := map[string]string{} + if c.reattachInfo != nil { + reattachStr, err := c.reattachInfo.marshalString() + if err != nil { + return nil, err + } + mergeEnv[reattachEnvVar] = reattachStr + } + + return tf.buildTerraformCmd(ctx, mergeEnv, args...), nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/providers_lock.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/providers_lock.go new file mode 100644 index 000000000..b3a20216d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/providers_lock.go @@ -0,0 +1,82 @@ +package tfexec + +import ( + "context" + "fmt" + "os/exec" +) + +type providersLockConfig struct { + fsMirror string + netMirror string + platforms []string + providers []string +} + +var defaultProvidersLockOptions = providersLockConfig{} + +type ProvidersLockOption interface { + configureProvidersLock(*providersLockConfig) +} + +func (opt *FSMirrorOption) configureProvidersLock(conf *providersLockConfig) { + conf.fsMirror = opt.fsMirror +} + +func (opt *NetMirrorOption) configureProvidersLock(conf *providersLockConfig) { + conf.netMirror = opt.netMirror +} + +func (opt *PlatformOption) configureProvidersLock(conf *providersLockConfig) { + conf.platforms = append(conf.platforms, opt.platform) +} + +func (opt *ProviderOption) configureProvidersLock(conf *providersLockConfig) { + conf.providers = append(conf.providers, opt.provider) +} + +// ProvidersLock represents the `terraform providers lock` command +func (tf *Terraform) ProvidersLock(ctx context.Context, opts ...ProvidersLockOption) error { + err := tf.compatible(ctx, tf0_14_0, nil) + if err != nil { + return fmt.Errorf("terraform providers lock was added in 0.14.0: %w", err) + } + + lockCmd := tf.providersLockCmd(ctx, opts...) + + err = tf.runTerraformCmd(ctx, lockCmd) + if err != nil { + return err + } + + return err +} + +func (tf *Terraform) providersLockCmd(ctx context.Context, opts ...ProvidersLockOption) *exec.Cmd { + c := defaultProvidersLockOptions + + for _, o := range opts { + o.configureProvidersLock(&c) + } + args := []string{"providers", "lock"} + + // string options, only pass if set + if c.fsMirror != "" { + args = append(args, "-fs-mirror="+c.fsMirror) + } + + if c.netMirror != "" { + args = append(args, "-net-mirror="+c.netMirror) + } + + for _, p := range c.platforms { + args = append(args, "-platform="+p) + } + + // positional providers argument + for _, p := range c.providers { + args = append(args, p) + } + + return tf.buildTerraformCmd(ctx, nil, args...) +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/providers_schema.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/providers_schema.go new file mode 100644 index 000000000..52efc5db6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/providers_schema.go @@ -0,0 +1,33 @@ +package tfexec + +import ( + "context" + "os/exec" + + tfjson "github.com/hashicorp/terraform-json" +) + +// ProvidersSchema represents the terraform providers schema -json subcommand. +func (tf *Terraform) ProvidersSchema(ctx context.Context) (*tfjson.ProviderSchemas, error) { + schemaCmd := tf.providersSchemaCmd(ctx) + + var ret tfjson.ProviderSchemas + err := tf.runTerraformCmdJSON(ctx, schemaCmd, &ret) + if err != nil { + return nil, err + } + + err = ret.Validate() + if err != nil { + return nil, err + } + + return &ret, nil +} + +func (tf *Terraform) providersSchemaCmd(ctx context.Context, args ...string) *exec.Cmd { + allArgs := []string{"providers", "schema", "-json", "-no-color"} + allArgs = append(allArgs, args...) + + return tf.buildTerraformCmd(ctx, nil, allArgs...) +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/refresh.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/refresh.go new file mode 100644 index 000000000..78f6b4b50 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/refresh.go @@ -0,0 +1,137 @@ +package tfexec + +import ( + "context" + "os/exec" + "strconv" +) + +type refreshConfig struct { + backup string + dir string + lock bool + lockTimeout string + reattachInfo ReattachInfo + state string + stateOut string + targets []string + vars []string + varFiles []string +} + +var defaultRefreshOptions = refreshConfig{ + lock: true, + lockTimeout: "0s", +} + +// RefreshCmdOption represents options used in the Refresh method. +type RefreshCmdOption interface { + configureRefresh(*refreshConfig) +} + +func (opt *BackupOption) configureRefresh(conf *refreshConfig) { + conf.backup = opt.path +} + +func (opt *DirOption) configureRefresh(conf *refreshConfig) { + conf.dir = opt.path +} + +func (opt *LockOption) configureRefresh(conf *refreshConfig) { + conf.lock = opt.lock +} + +func (opt *LockTimeoutOption) configureRefresh(conf *refreshConfig) { + conf.lockTimeout = opt.timeout +} + +func (opt *ReattachOption) configureRefresh(conf *refreshConfig) { + conf.reattachInfo = opt.info +} + +func (opt *StateOption) configureRefresh(conf *refreshConfig) { + conf.state = opt.path +} + +func (opt *StateOutOption) configureRefresh(conf *refreshConfig) { + conf.stateOut = opt.path +} + +func (opt *TargetOption) configureRefresh(conf *refreshConfig) { + conf.targets = append(conf.targets, opt.target) +} + +func (opt *VarOption) configureRefresh(conf *refreshConfig) { + conf.vars = append(conf.vars, opt.assignment) +} + +func (opt *VarFileOption) configureRefresh(conf *refreshConfig) { + conf.varFiles = append(conf.varFiles, opt.path) +} + +// Refresh represents the terraform refresh subcommand. +func (tf *Terraform) Refresh(ctx context.Context, opts ...RefreshCmdOption) error { + cmd, err := tf.refreshCmd(ctx, opts...) + if err != nil { + return err + } + return tf.runTerraformCmd(ctx, cmd) +} + +func (tf *Terraform) refreshCmd(ctx context.Context, opts ...RefreshCmdOption) (*exec.Cmd, error) { + c := defaultRefreshOptions + + for _, o := range opts { + o.configureRefresh(&c) + } + + args := []string{"refresh", "-no-color", "-input=false"} + + // string opts: only pass if set + if c.backup != "" { + args = append(args, "-backup="+c.backup) + } + if c.lockTimeout != "" { + args = append(args, "-lock-timeout="+c.lockTimeout) + } + if c.state != "" { + args = append(args, "-state="+c.state) + } + if c.stateOut != "" { + args = append(args, "-state-out="+c.stateOut) + } + for _, vf := range c.varFiles { + args = append(args, "-var-file="+vf) + } + + // boolean and numerical opts: always pass + args = append(args, "-lock="+strconv.FormatBool(c.lock)) + + // string slice opts: split into separate args + if c.targets != nil { + for _, ta := range c.targets { + args = append(args, "-target="+ta) + } + } + if c.vars != nil { + for _, v := range c.vars { + args = append(args, "-var", v) + } + } + + // optional positional argument + if c.dir != "" { + args = append(args, c.dir) + } + + mergeEnv := map[string]string{} + if c.reattachInfo != nil { + reattachStr, err := c.reattachInfo.marshalString() + if err != nil { + return nil, err + } + mergeEnv[reattachEnvVar] = reattachStr + } + + return tf.buildTerraformCmd(ctx, mergeEnv, args...), nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/show.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/show.go new file mode 100644 index 000000000..61e660ac9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/show.go @@ -0,0 +1,196 @@ +package tfexec + +import ( + "context" + "fmt" + "os/exec" + "strings" + + tfjson "github.com/hashicorp/terraform-json" +) + +type showConfig struct { + reattachInfo ReattachInfo +} + +var defaultShowOptions = showConfig{} + +type ShowOption interface { + configureShow(*showConfig) +} + +func (opt *ReattachOption) configureShow(conf *showConfig) { + conf.reattachInfo = opt.info +} + +// Show reads the default state path and outputs the state. +// To read a state or plan file, ShowState or ShowPlan must be used instead. +func (tf *Terraform) Show(ctx context.Context, opts ...ShowOption) (*tfjson.State, error) { + err := tf.compatible(ctx, tf0_12_0, nil) + if err != nil { + return nil, fmt.Errorf("terraform show -json was added in 0.12.0: %w", err) + } + + c := defaultShowOptions + + for _, o := range opts { + o.configureShow(&c) + } + + mergeEnv := map[string]string{} + if c.reattachInfo != nil { + reattachStr, err := c.reattachInfo.marshalString() + if err != nil { + return nil, err + } + mergeEnv[reattachEnvVar] = reattachStr + } + + showCmd := tf.showCmd(ctx, true, mergeEnv) + + var ret tfjson.State + ret.UseJSONNumber(true) + err = tf.runTerraformCmdJSON(ctx, showCmd, &ret) + if err != nil { + return nil, err + } + + err = ret.Validate() + if err != nil { + return nil, err + } + + return &ret, nil +} + +// ShowStateFile reads a given state file and outputs the state. +func (tf *Terraform) ShowStateFile(ctx context.Context, statePath string, opts ...ShowOption) (*tfjson.State, error) { + err := tf.compatible(ctx, tf0_12_0, nil) + if err != nil { + return nil, fmt.Errorf("terraform show -json was added in 0.12.0: %w", err) + } + + if statePath == "" { + return nil, fmt.Errorf("statePath cannot be blank: use Show() if not passing statePath") + } + + c := defaultShowOptions + + for _, o := range opts { + o.configureShow(&c) + } + + mergeEnv := map[string]string{} + if c.reattachInfo != nil { + reattachStr, err := c.reattachInfo.marshalString() + if err != nil { + return nil, err + } + mergeEnv[reattachEnvVar] = reattachStr + } + + showCmd := tf.showCmd(ctx, true, mergeEnv, statePath) + + var ret tfjson.State + ret.UseJSONNumber(true) + err = tf.runTerraformCmdJSON(ctx, showCmd, &ret) + if err != nil { + return nil, err + } + + err = ret.Validate() + if err != nil { + return nil, err + } + + return &ret, nil +} + +// ShowPlanFile reads a given plan file and outputs the plan. +func (tf *Terraform) ShowPlanFile(ctx context.Context, planPath string, opts ...ShowOption) (*tfjson.Plan, error) { + err := tf.compatible(ctx, tf0_12_0, nil) + if err != nil { + return nil, fmt.Errorf("terraform show -json was added in 0.12.0: %w", err) + } + + if planPath == "" { + return nil, fmt.Errorf("planPath cannot be blank: use Show() if not passing planPath") + } + + c := defaultShowOptions + + for _, o := range opts { + o.configureShow(&c) + } + + mergeEnv := map[string]string{} + if c.reattachInfo != nil { + reattachStr, err := c.reattachInfo.marshalString() + if err != nil { + return nil, err + } + mergeEnv[reattachEnvVar] = reattachStr + } + + showCmd := tf.showCmd(ctx, true, mergeEnv, planPath) + + var ret tfjson.Plan + err = tf.runTerraformCmdJSON(ctx, showCmd, &ret) + if err != nil { + return nil, err + } + + err = ret.Validate() + if err != nil { + return nil, err + } + + return &ret, nil + +} + +// ShowPlanFileRaw reads a given plan file and outputs the plan in a +// human-friendly, opaque format. +func (tf *Terraform) ShowPlanFileRaw(ctx context.Context, planPath string, opts ...ShowOption) (string, error) { + if planPath == "" { + return "", fmt.Errorf("planPath cannot be blank: use Show() if not passing planPath") + } + + c := defaultShowOptions + + for _, o := range opts { + o.configureShow(&c) + } + + mergeEnv := map[string]string{} + if c.reattachInfo != nil { + reattachStr, err := c.reattachInfo.marshalString() + if err != nil { + return "", err + } + mergeEnv[reattachEnvVar] = reattachStr + } + + showCmd := tf.showCmd(ctx, false, mergeEnv, planPath) + + var outBuf strings.Builder + showCmd.Stdout = &outBuf + err := tf.runTerraformCmd(ctx, showCmd) + if err != nil { + return "", err + } + + return outBuf.String(), nil + +} + +func (tf *Terraform) showCmd(ctx context.Context, jsonOutput bool, mergeEnv map[string]string, args ...string) *exec.Cmd { + allArgs := []string{"show"} + if jsonOutput { + allArgs = append(allArgs, "-json") + } + allArgs = append(allArgs, "-no-color") + allArgs = append(allArgs, args...) + + return tf.buildTerraformCmd(ctx, mergeEnv, allArgs...) +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/state_mv.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/state_mv.go new file mode 100644 index 000000000..fc7eecf86 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/state_mv.go @@ -0,0 +1,105 @@ +package tfexec + +import ( + "context" + "os/exec" + "strconv" +) + +type stateMvConfig struct { + backup string + backupOut string + dryRun bool + lock bool + lockTimeout string + state string + stateOut string +} + +var defaultStateMvOptions = stateMvConfig{ + lock: true, + lockTimeout: "0s", +} + +// StateMvCmdOption represents options used in the Refresh method. +type StateMvCmdOption interface { + configureStateMv(*stateMvConfig) +} + +func (opt *BackupOption) configureStateMv(conf *stateMvConfig) { + conf.backup = opt.path +} + +func (opt *BackupOutOption) configureStateMv(conf *stateMvConfig) { + conf.backupOut = opt.path +} + +func (opt *DryRunOption) configureStateMv(conf *stateMvConfig) { + conf.dryRun = opt.dryRun +} + +func (opt *LockOption) configureStateMv(conf *stateMvConfig) { + conf.lock = opt.lock +} + +func (opt *LockTimeoutOption) configureStateMv(conf *stateMvConfig) { + conf.lockTimeout = opt.timeout +} + +func (opt *StateOption) configureStateMv(conf *stateMvConfig) { + conf.state = opt.path +} + +func (opt *StateOutOption) configureStateMv(conf *stateMvConfig) { + conf.stateOut = opt.path +} + +// StateMv represents the terraform state mv subcommand. +func (tf *Terraform) StateMv(ctx context.Context, source string, destination string, opts ...StateMvCmdOption) error { + cmd, err := tf.stateMvCmd(ctx, source, destination, opts...) + if err != nil { + return err + } + return tf.runTerraformCmd(ctx, cmd) +} + +func (tf *Terraform) stateMvCmd(ctx context.Context, source string, destination string, opts ...StateMvCmdOption) (*exec.Cmd, error) { + c := defaultStateMvOptions + + for _, o := range opts { + o.configureStateMv(&c) + } + + args := []string{"state", "mv", "-no-color"} + + // string opts: only pass if set + if c.backup != "" { + args = append(args, "-backup="+c.backup) + } + if c.backupOut != "" { + args = append(args, "-backup-out="+c.backupOut) + } + if c.lockTimeout != "" { + args = append(args, "-lock-timeout="+c.lockTimeout) + } + if c.state != "" { + args = append(args, "-state="+c.state) + } + if c.stateOut != "" { + args = append(args, "-state-out="+c.stateOut) + } + + // boolean and numerical opts: always pass + args = append(args, "-lock="+strconv.FormatBool(c.lock)) + + // unary flags: pass if true + if c.dryRun { + args = append(args, "-dry-run") + } + + // positional arguments + args = append(args, source) + args = append(args, destination) + + return tf.buildTerraformCmd(ctx, nil, args...), nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/state_rm.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/state_rm.go new file mode 100644 index 000000000..0c5dd6667 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/state_rm.go @@ -0,0 +1,104 @@ +package tfexec + +import ( + "context" + "os/exec" + "strconv" +) + +type stateRmConfig struct { + backup string + backupOut string + dryRun bool + lock bool + lockTimeout string + state string + stateOut string +} + +var defaultStateRmOptions = stateRmConfig{ + lock: true, + lockTimeout: "0s", +} + +// StateRmCmdOption represents options used in the Refresh method. +type StateRmCmdOption interface { + configureStateRm(*stateRmConfig) +} + +func (opt *BackupOption) configureStateRm(conf *stateRmConfig) { + conf.backup = opt.path +} + +func (opt *BackupOutOption) configureStateRm(conf *stateRmConfig) { + conf.backupOut = opt.path +} + +func (opt *DryRunOption) configureStateRm(conf *stateRmConfig) { + conf.dryRun = opt.dryRun +} + +func (opt *LockOption) configureStateRm(conf *stateRmConfig) { + conf.lock = opt.lock +} + +func (opt *LockTimeoutOption) configureStateRm(conf *stateRmConfig) { + conf.lockTimeout = opt.timeout +} + +func (opt *StateOption) configureStateRm(conf *stateRmConfig) { + conf.state = opt.path +} + +func (opt *StateOutOption) configureStateRm(conf *stateRmConfig) { + conf.stateOut = opt.path +} + +// StateRm represents the terraform state rm subcommand. +func (tf *Terraform) StateRm(ctx context.Context, address string, opts ...StateRmCmdOption) error { + cmd, err := tf.stateRmCmd(ctx, address, opts...) + if err != nil { + return err + } + return tf.runTerraformCmd(ctx, cmd) +} + +func (tf *Terraform) stateRmCmd(ctx context.Context, address string, opts ...StateRmCmdOption) (*exec.Cmd, error) { + c := defaultStateRmOptions + + for _, o := range opts { + o.configureStateRm(&c) + } + + args := []string{"state", "rm", "-no-color"} + + // string opts: only pass if set + if c.backup != "" { + args = append(args, "-backup="+c.backup) + } + if c.backupOut != "" { + args = append(args, "-backup-out="+c.backupOut) + } + if c.lockTimeout != "" { + args = append(args, "-lock-timeout="+c.lockTimeout) + } + if c.state != "" { + args = append(args, "-state="+c.state) + } + if c.stateOut != "" { + args = append(args, "-state-out="+c.stateOut) + } + + // boolean and numerical opts: always pass + args = append(args, "-lock="+strconv.FormatBool(c.lock)) + + // unary flags: pass if true + if c.dryRun { + args = append(args, "-dry-run") + } + + // positional arguments + args = append(args, address) + + return tf.buildTerraformCmd(ctx, nil, args...), nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/taint.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/taint.go new file mode 100644 index 000000000..cd69df308 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/taint.go @@ -0,0 +1,78 @@ +package tfexec + +import ( + "context" + "fmt" + "os/exec" + "strconv" +) + +type taintConfig struct { + state string + allowMissing bool + lock bool + lockTimeout string +} + +var defaultTaintOptions = taintConfig{ + allowMissing: false, + lock: true, +} + +// TaintOption represents options used in the Taint method. +type TaintOption interface { + configureTaint(*taintConfig) +} + +func (opt *StateOption) configureTaint(conf *taintConfig) { + conf.state = opt.path +} + +func (opt *AllowMissingOption) configureTaint(conf *taintConfig) { + conf.allowMissing = opt.allowMissing +} + +func (opt *LockOption) configureTaint(conf *taintConfig) { + conf.lock = opt.lock +} + +func (opt *LockTimeoutOption) configureTaint(conf *taintConfig) { + conf.lockTimeout = opt.timeout +} + +// Taint represents the terraform taint subcommand. +func (tf *Terraform) Taint(ctx context.Context, address string, opts ...TaintOption) error { + err := tf.compatible(ctx, tf0_4_1, nil) + if err != nil { + return fmt.Errorf("taint was first introduced in Terraform 0.4.1: %w", err) + } + taintCmd := tf.taintCmd(ctx, address, opts...) + return tf.runTerraformCmd(ctx, taintCmd) +} + +func (tf *Terraform) taintCmd(ctx context.Context, address string, opts ...TaintOption) *exec.Cmd { + c := defaultTaintOptions + + for _, o := range opts { + o.configureTaint(&c) + } + + args := []string{"taint", "-no-color"} + + if c.lockTimeout != "" { + args = append(args, "-lock-timeout="+c.lockTimeout) + } + + // string opts: only pass if set + if c.state != "" { + args = append(args, "-state="+c.state) + } + + args = append(args, "-lock="+strconv.FormatBool(c.lock)) + if c.allowMissing { + args = append(args, "-allow-missing") + } + args = append(args, address) + + return tf.buildTerraformCmd(ctx, nil, args...) +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/terraform.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/terraform.go new file mode 100644 index 000000000..2ad143a41 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/terraform.go @@ -0,0 +1,165 @@ +package tfexec + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "sync" + + "github.com/hashicorp/go-version" +) + +type printfer interface { + Printf(format string, v ...interface{}) +} + +// Terraform represents the Terraform CLI executable and working directory. +// +// Typically this is constructed against the root module of a Terraform configuration +// but you can override paths used in some commands depending on the available +// options. +// +// All functions that execute CLI commands take a context.Context. It should be noted that +// exec.Cmd.Run will not return context.DeadlineExceeded or context.Canceled by default, we +// have augmented our wrapped errors to respond true to errors.Is for context.DeadlineExceeded +// and context.Canceled if those are present on the context when the error is parsed. See +// https://github.com/golang/go/issues/21880 for more about the Go limitations. +// +// By default, the instance inherits the environment from the calling code (using os.Environ) +// but it ignores certain environment variables that are managed within the code and prohibits +// setting them through SetEnv: +// +// - TF_APPEND_USER_AGENT +// - TF_IN_AUTOMATION +// - TF_INPUT +// - TF_LOG +// - TF_LOG_PATH +// - TF_REATTACH_PROVIDERS +// - TF_DISABLE_PLUGIN_TLS +// - TF_SKIP_PROVIDER_VERIFY +type Terraform struct { + execPath string + workingDir string + appendUserAgent string + disablePluginTLS bool + skipProviderVerify bool + env map[string]string + + stdout io.Writer + stderr io.Writer + logger printfer + logPath string + + versionLock sync.Mutex + execVersion *version.Version + provVersions map[string]*version.Version +} + +// NewTerraform returns a Terraform struct with default values for all fields. +// If a blank execPath is supplied, NewTerraform will error. +// Use hc-install or output from os.LookPath to get a desirable execPath. +func NewTerraform(workingDir string, execPath string) (*Terraform, error) { + if workingDir == "" { + return nil, fmt.Errorf("Terraform cannot be initialised with empty workdir") + } + + if _, err := os.Stat(workingDir); err != nil { + return nil, fmt.Errorf("error initialising Terraform with workdir %s: %s", workingDir, err) + } + + if execPath == "" { + err := fmt.Errorf("NewTerraform: please supply the path to a Terraform executable using execPath, e.g. using the github.com/hashicorp/hc-install module.") + return nil, &ErrNoSuitableBinary{ + err: err, + } + } + tf := Terraform{ + execPath: execPath, + workingDir: workingDir, + env: nil, // explicit nil means copy os.Environ + logger: log.New(ioutil.Discard, "", 0), + } + + return &tf, nil +} + +// SetEnv allows you to override environment variables, this should not be used for any well known +// Terraform environment variables that are already covered in options. Pass nil to copy the values +// from os.Environ. Attempting to set environment variables that should be managed manually will +// result in ErrManualEnvVar being returned. +func (tf *Terraform) SetEnv(env map[string]string) error { + prohibited := ProhibitedEnv(env) + if len(prohibited) > 0 { + // just error on the first instance + return &ErrManualEnvVar{prohibited[0]} + } + + tf.env = env + return nil +} + +// SetLogger specifies a logger for tfexec to use. +func (tf *Terraform) SetLogger(logger printfer) { + tf.logger = logger +} + +// SetStdout specifies a writer to stream stdout to for every command. +// +// This should be used for information or logging purposes only, not control +// flow. Any parsing necessary should be added as functionality to this package. +func (tf *Terraform) SetStdout(w io.Writer) { + tf.stdout = w +} + +// SetStderr specifies a writer to stream stderr to for every command. +// +// This should be used for information or logging purposes only, not control +// flow. Any parsing necessary should be added as functionality to this package. +func (tf *Terraform) SetStderr(w io.Writer) { + tf.stderr = w +} + +// SetLogPath sets the TF_LOG_PATH environment variable for Terraform CLI +// execution. +func (tf *Terraform) SetLogPath(path string) error { + tf.logPath = path + return nil +} + +// SetAppendUserAgent sets the TF_APPEND_USER_AGENT environment variable for +// Terraform CLI execution. +func (tf *Terraform) SetAppendUserAgent(ua string) error { + tf.appendUserAgent = ua + return nil +} + +// SetDisablePluginTLS sets the TF_DISABLE_PLUGIN_TLS environment variable for +// Terraform CLI execution. +func (tf *Terraform) SetDisablePluginTLS(disabled bool) error { + tf.disablePluginTLS = disabled + return nil +} + +// SetSkipProviderVerify sets the TF_SKIP_PROVIDER_VERIFY environment variable +// for Terraform CLI execution. This is no longer used in 0.13.0 and greater. +func (tf *Terraform) SetSkipProviderVerify(skip bool) error { + err := tf.compatible(context.Background(), nil, tf0_13_0) + if err != nil { + return err + } + tf.skipProviderVerify = skip + return nil +} + +// WorkingDir returns the working directory for Terraform. +func (tf *Terraform) WorkingDir() string { + return tf.workingDir +} + +// ExecPath returns the path to the Terraform executable. +func (tf *Terraform) ExecPath() string { + return tf.execPath +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/untaint.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/untaint.go new file mode 100644 index 000000000..bda127277 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/untaint.go @@ -0,0 +1,78 @@ +package tfexec + +import ( + "context" + "fmt" + "os/exec" + "strconv" +) + +type untaintConfig struct { + state string + allowMissing bool + lock bool + lockTimeout string +} + +var defaultUntaintOptions = untaintConfig{ + allowMissing: false, + lock: true, +} + +// OutputOption represents options used in the Output method. +type UntaintOption interface { + configureUntaint(*untaintConfig) +} + +func (opt *StateOption) configureUntaint(conf *untaintConfig) { + conf.state = opt.path +} + +func (opt *AllowMissingOption) configureUntaint(conf *untaintConfig) { + conf.allowMissing = opt.allowMissing +} + +func (opt *LockOption) configureUntaint(conf *untaintConfig) { + conf.lock = opt.lock +} + +func (opt *LockTimeoutOption) configureUntaint(conf *untaintConfig) { + conf.lockTimeout = opt.timeout +} + +// Untaint represents the terraform untaint subcommand. +func (tf *Terraform) Untaint(ctx context.Context, address string, opts ...UntaintOption) error { + err := tf.compatible(ctx, tf0_6_13, nil) + if err != nil { + return fmt.Errorf("untaint was first introduced in Terraform 0.6.13: %w", err) + } + untaintCmd := tf.untaintCmd(ctx, address, opts...) + return tf.runTerraformCmd(ctx, untaintCmd) +} + +func (tf *Terraform) untaintCmd(ctx context.Context, address string, opts ...UntaintOption) *exec.Cmd { + c := defaultUntaintOptions + + for _, o := range opts { + o.configureUntaint(&c) + } + + args := []string{"untaint", "-no-color"} + + if c.lockTimeout != "" { + args = append(args, "-lock-timeout="+c.lockTimeout) + } + + // string opts: only pass if set + if c.state != "" { + args = append(args, "-state="+c.state) + } + + args = append(args, "-lock="+strconv.FormatBool(c.lock)) + if c.allowMissing { + args = append(args, "-allow-missing") + } + args = append(args, address) + + return tf.buildTerraformCmd(ctx, nil, args...) +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/upgrade012.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/upgrade012.go new file mode 100644 index 000000000..e55237a7b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/upgrade012.go @@ -0,0 +1,80 @@ +package tfexec + +import ( + "context" + "fmt" + "os/exec" +) + +type upgrade012Config struct { + dir string + force bool + + reattachInfo ReattachInfo +} + +var defaultUpgrade012Options = upgrade012Config{ + force: false, +} + +// Upgrade012Option represents options used in the Destroy method. +type Upgrade012Option interface { + configureUpgrade012(*upgrade012Config) +} + +func (opt *DirOption) configureUpgrade012(conf *upgrade012Config) { + conf.dir = opt.path +} + +func (opt *ForceOption) configureUpgrade012(conf *upgrade012Config) { + conf.force = opt.force +} + +func (opt *ReattachOption) configureUpgrade012(conf *upgrade012Config) { + conf.reattachInfo = opt.info +} + +// Upgrade012 represents the terraform 0.12upgrade subcommand. +func (tf *Terraform) Upgrade012(ctx context.Context, opts ...Upgrade012Option) error { + cmd, err := tf.upgrade012Cmd(ctx, opts...) + if err != nil { + return err + } + return tf.runTerraformCmd(ctx, cmd) +} + +func (tf *Terraform) upgrade012Cmd(ctx context.Context, opts ...Upgrade012Option) (*exec.Cmd, error) { + err := tf.compatible(ctx, tf0_12_0, tf0_13_0) + if err != nil { + return nil, fmt.Errorf("terraform 0.12upgrade is only supported in 0.12 releases: %w", err) + } + + c := defaultUpgrade012Options + + for _, o := range opts { + o.configureUpgrade012(&c) + } + + args := []string{"0.12upgrade", "-no-color", "-yes"} + + // boolean opts: only pass if set + if c.force { + args = append(args, "-force") + } + + // optional positional argument + if c.dir != "" { + args = append(args, c.dir) + } + + mergeEnv := map[string]string{} + if c.reattachInfo != nil { + reattachStr, err := c.reattachInfo.marshalString() + if err != nil { + return nil, err + } + mergeEnv[reattachEnvVar] = reattachStr + } + + return tf.buildTerraformCmd(ctx, mergeEnv, args...), nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/upgrade013.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/upgrade013.go new file mode 100644 index 000000000..f1f444e2f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/upgrade013.go @@ -0,0 +1,68 @@ +package tfexec + +import ( + "context" + "fmt" + "os/exec" +) + +type upgrade013Config struct { + dir string + + reattachInfo ReattachInfo +} + +var defaultUpgrade013Options = upgrade013Config{} + +// Upgrade013Option represents options used in the Destroy method. +type Upgrade013Option interface { + configureUpgrade013(*upgrade013Config) +} + +func (opt *DirOption) configureUpgrade013(conf *upgrade013Config) { + conf.dir = opt.path +} + +func (opt *ReattachOption) configureUpgrade013(conf *upgrade013Config) { + conf.reattachInfo = opt.info +} + +// Upgrade013 represents the terraform 0.13upgrade subcommand. +func (tf *Terraform) Upgrade013(ctx context.Context, opts ...Upgrade013Option) error { + cmd, err := tf.upgrade013Cmd(ctx, opts...) + if err != nil { + return err + } + return tf.runTerraformCmd(ctx, cmd) +} + +func (tf *Terraform) upgrade013Cmd(ctx context.Context, opts ...Upgrade013Option) (*exec.Cmd, error) { + err := tf.compatible(ctx, tf0_13_0, tf0_14_0) + if err != nil { + return nil, fmt.Errorf("terraform 0.13upgrade is only supported in 0.13 releases: %w", err) + } + + c := defaultUpgrade013Options + + for _, o := range opts { + o.configureUpgrade013(&c) + } + + args := []string{"0.13upgrade", "-no-color", "-yes"} + + // optional positional argument + if c.dir != "" { + args = append(args, c.dir) + } + + mergeEnv := map[string]string{} + if c.reattachInfo != nil { + reattachStr, err := c.reattachInfo.marshalString() + if err != nil { + return nil, err + } + mergeEnv[reattachEnvVar] = reattachStr + } + + return tf.buildTerraformCmd(ctx, mergeEnv, args...), nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/validate.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/validate.go new file mode 100644 index 000000000..320011df1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/validate.go @@ -0,0 +1,44 @@ +package tfexec + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + + tfjson "github.com/hashicorp/terraform-json" +) + +// Validate represents the validate subcommand to the Terraform CLI. The -json +// flag support was added in 0.12.0, so this will not work on earlier versions. +func (tf *Terraform) Validate(ctx context.Context) (*tfjson.ValidateOutput, error) { + err := tf.compatible(ctx, tf0_12_0, nil) + if err != nil { + return nil, fmt.Errorf("terraform validate -json was added in 0.12.0: %w", err) + } + + cmd := tf.buildTerraformCmd(ctx, nil, "validate", "-no-color", "-json") + + var outBuf = bytes.Buffer{} + cmd.Stdout = &outBuf + + err = tf.runTerraformCmd(ctx, cmd) + // TODO: this command should not exit 1 if you pass -json as its hard to differentiate other errors + if err != nil && cmd.ProcessState.ExitCode() != 1 { + return nil, err + } + + var ret tfjson.ValidateOutput + // TODO: ret.UseJSONNumber(true) validate output should support JSON numbers + jsonErr := json.Unmarshal(outBuf.Bytes(), &ret) + if jsonErr != nil { + // the original call was possibly bad, if it has an error, actually just return that + if err != nil { + return nil, err + } + + return nil, jsonErr + } + + return &ret, nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/version.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/version.go new file mode 100644 index 000000000..9978ae28d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/version.go @@ -0,0 +1,208 @@ +package tfexec + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "regexp" + "strconv" + "strings" + + "github.com/hashicorp/go-version" + tfjson "github.com/hashicorp/terraform-json" +) + +var ( + tf0_4_1 = version.Must(version.NewVersion("0.4.1")) + tf0_5_0 = version.Must(version.NewVersion("0.5.0")) + tf0_6_13 = version.Must(version.NewVersion("0.6.13")) + tf0_7_7 = version.Must(version.NewVersion("0.7.7")) + tf0_8_0 = version.Must(version.NewVersion("0.8.0")) + tf0_10_0 = version.Must(version.NewVersion("0.10.0")) + tf0_12_0 = version.Must(version.NewVersion("0.12.0")) + tf0_13_0 = version.Must(version.NewVersion("0.13.0")) + tf0_14_0 = version.Must(version.NewVersion("0.14.0")) + tf0_15_0 = version.Must(version.NewVersion("0.15.0")) + tf0_15_2 = version.Must(version.NewVersion("0.15.2")) + tf1_1_0 = version.Must(version.NewVersion("1.1.0")) +) + +// Version returns structured output from the terraform version command including both the Terraform CLI version +// and any initialized provider versions. This will read cached values when present unless the skipCache parameter +// is set to true. +func (tf *Terraform) Version(ctx context.Context, skipCache bool) (tfVersion *version.Version, providerVersions map[string]*version.Version, err error) { + tf.versionLock.Lock() + defer tf.versionLock.Unlock() + + if tf.execVersion == nil || skipCache { + tf.execVersion, tf.provVersions, err = tf.version(ctx) + if err != nil { + return nil, nil, err + } + } + + return tf.execVersion, tf.provVersions, nil +} + +// version does not use the locking on the Terraform instance and should probably not be used directly, prefer Version. +func (tf *Terraform) version(ctx context.Context) (*version.Version, map[string]*version.Version, error) { + versionCmd := tf.buildTerraformCmd(ctx, nil, "version", "-json") + + var outBuf bytes.Buffer + versionCmd.Stdout = &outBuf + + err := tf.runTerraformCmd(ctx, versionCmd) + if err != nil { + return nil, nil, err + } + + tfVersion, providerVersions, err := parseJsonVersionOutput(outBuf.Bytes()) + if err != nil { + if _, ok := err.(*json.SyntaxError); ok { + return tf.versionFromPlaintext(ctx) + } + } + + return tfVersion, providerVersions, err +} + +func parseJsonVersionOutput(stdout []byte) (*version.Version, map[string]*version.Version, error) { + var out tfjson.VersionOutput + err := json.Unmarshal(stdout, &out) + if err != nil { + return nil, nil, err + } + + tfVersion, err := version.NewVersion(out.Version) + if err != nil { + return nil, nil, fmt.Errorf("unable to parse version %q: %w", out.Version, err) + } + + providerVersions := make(map[string]*version.Version, 0) + for provider, versionStr := range out.ProviderSelections { + v, err := version.NewVersion(versionStr) + if err != nil { + return nil, nil, fmt.Errorf("unable to parse %q version %q: %w", + provider, versionStr, err) + } + providerVersions[provider] = v + } + + return tfVersion, providerVersions, nil +} + +func (tf *Terraform) versionFromPlaintext(ctx context.Context) (*version.Version, map[string]*version.Version, error) { + versionCmd := tf.buildTerraformCmd(ctx, nil, "version") + + var outBuf strings.Builder + versionCmd.Stdout = &outBuf + + err := tf.runTerraformCmd(ctx, versionCmd) + if err != nil { + return nil, nil, err + } + + tfVersion, providerVersions, err := parsePlaintextVersionOutput(outBuf.String()) + if err != nil { + return nil, nil, fmt.Errorf("unable to parse version: %w", err) + } + + return tfVersion, providerVersions, nil +} + +var ( + simpleVersionRe = `v?(?P[0-9]+(?:\.[0-9]+)*(?:-[A-Za-z0-9\.]+)?)` + + versionOutputRe = regexp.MustCompile(`Terraform ` + simpleVersionRe) + providerVersionOutputRe = regexp.MustCompile(`(\n\+ provider[\. ](?P\S+) ` + simpleVersionRe + `)`) +) + +func parsePlaintextVersionOutput(stdout string) (*version.Version, map[string]*version.Version, error) { + stdout = strings.TrimSpace(stdout) + + submatches := versionOutputRe.FindStringSubmatch(stdout) + if len(submatches) != 2 { + return nil, nil, fmt.Errorf("unexpected number of version matches %d for %s", len(submatches), stdout) + } + v, err := version.NewVersion(submatches[1]) + if err != nil { + return nil, nil, fmt.Errorf("unable to parse version %q: %w", submatches[1], err) + } + + allSubmatches := providerVersionOutputRe.FindAllStringSubmatch(stdout, -1) + provV := map[string]*version.Version{} + + for _, submatches := range allSubmatches { + if len(submatches) != 4 { + return nil, nil, fmt.Errorf("unexpected number of provider version matches %d for %s", len(submatches), stdout) + } + + v, err := version.NewVersion(submatches[3]) + if err != nil { + return nil, nil, fmt.Errorf("unable to parse provider version %q: %w", submatches[3], err) + } + + provV[submatches[2]] = v + } + + return v, provV, err +} + +func errorVersionString(v *version.Version) string { + if v == nil { + return "-" + } + return v.String() +} + +// compatible asserts compatibility of the cached terraform version with the executable, and returns a well known error if not. +func (tf *Terraform) compatible(ctx context.Context, minInclusive *version.Version, maxExclusive *version.Version) error { + tfv, _, err := tf.Version(ctx, false) + if err != nil { + return err + } + if ok := versionInRange(tfv, minInclusive, maxExclusive); !ok { + return &ErrVersionMismatch{ + MinInclusive: errorVersionString(minInclusive), + MaxExclusive: errorVersionString(maxExclusive), + Actual: errorVersionString(tfv), + } + } + + return nil +} + +func stripPrereleaseAndMeta(v *version.Version) *version.Version { + if v == nil { + return nil + } + segs := []string{} + for _, s := range v.Segments() { + segs = append(segs, strconv.Itoa(s)) + } + vs := strings.Join(segs, ".") + clean, _ := version.NewVersion(vs) + return clean +} + +// versionInRange checks compatibility of the Terraform version. The minimum is inclusive and the max +// is exclusive, equivalent to min <= expected version < max. +// +// Pre-release information is ignored for comparison. +func versionInRange(tfv *version.Version, minInclusive *version.Version, maxExclusive *version.Version) bool { + if minInclusive == nil && maxExclusive == nil { + return true + } + tfv = stripPrereleaseAndMeta(tfv) + minInclusive = stripPrereleaseAndMeta(minInclusive) + maxExclusive = stripPrereleaseAndMeta(maxExclusive) + if minInclusive != nil && !tfv.GreaterThanOrEqual(minInclusive) { + return false + } + if maxExclusive != nil && !tfv.LessThan(maxExclusive) { + return false + } + + return true +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_delete.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_delete.go new file mode 100644 index 000000000..526772079 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_delete.go @@ -0,0 +1,81 @@ +package tfexec + +import ( + "context" + "fmt" + "os/exec" + "strconv" +) + +type workspaceDeleteConfig struct { + lock bool + lockTimeout string + force bool +} + +var defaultWorkspaceDeleteOptions = workspaceDeleteConfig{ + lock: true, + lockTimeout: "0s", +} + +// WorkspaceDeleteCmdOption represents options that are applicable to the WorkspaceDelete method. +type WorkspaceDeleteCmdOption interface { + configureWorkspaceDelete(*workspaceDeleteConfig) +} + +func (opt *LockOption) configureWorkspaceDelete(conf *workspaceDeleteConfig) { + conf.lock = opt.lock +} + +func (opt *LockTimeoutOption) configureWorkspaceDelete(conf *workspaceDeleteConfig) { + conf.lockTimeout = opt.timeout +} + +func (opt *ForceOption) configureWorkspaceDelete(conf *workspaceDeleteConfig) { + conf.force = opt.force +} + +// WorkspaceDelete represents the workspace delete subcommand to the Terraform CLI. +func (tf *Terraform) WorkspaceDelete(ctx context.Context, workspace string, opts ...WorkspaceDeleteCmdOption) error { + cmd, err := tf.workspaceDeleteCmd(ctx, workspace, opts...) + if err != nil { + return err + } + return tf.runTerraformCmd(ctx, cmd) +} + +func (tf *Terraform) workspaceDeleteCmd(ctx context.Context, workspace string, opts ...WorkspaceDeleteCmdOption) (*exec.Cmd, error) { + c := defaultWorkspaceDeleteOptions + + for _, o := range opts { + switch o.(type) { + case *LockOption, *LockTimeoutOption: + err := tf.compatible(ctx, tf0_12_0, nil) + if err != nil { + return nil, fmt.Errorf("-lock and -lock-timeout were added to workspace delete in Terraform 0.12: %w", err) + } + } + + o.configureWorkspaceDelete(&c) + } + + args := []string{"workspace", "delete", "-no-color"} + + if c.force { + args = append(args, "-force") + } + if c.lockTimeout != "" && c.lockTimeout != defaultWorkspaceDeleteOptions.lockTimeout { + // only pass if not default, so we don't need to worry about the 0.11 version check + args = append(args, "-lock-timeout="+c.lockTimeout) + } + if !c.lock { + // only pass if false, so we don't need to worry about the 0.11 version check + args = append(args, "-lock="+strconv.FormatBool(c.lock)) + } + + args = append(args, workspace) + + cmd := tf.buildTerraformCmd(ctx, nil, args...) + + return cmd, nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_list.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_list.go new file mode 100644 index 000000000..33c0d779b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_list.go @@ -0,0 +1,46 @@ +package tfexec + +import ( + "context" + "strings" +) + +// WorkspaceList represents the workspace list subcommand to the Terraform CLI. +func (tf *Terraform) WorkspaceList(ctx context.Context) ([]string, string, error) { + // TODO: [DIR] param option + wlCmd := tf.buildTerraformCmd(ctx, nil, "workspace", "list", "-no-color") + + var outBuf strings.Builder + wlCmd.Stdout = &outBuf + + err := tf.runTerraformCmd(ctx, wlCmd) + if err != nil { + return nil, "", err + } + + ws, current := parseWorkspaceList(outBuf.String()) + + return ws, current, nil +} + +const currentWorkspacePrefix = "* " + +func parseWorkspaceList(stdout string) ([]string, string) { + lines := strings.Split(stdout, "\n") + + current := "" + workspaces := []string{} + for _, line := range lines { + line = strings.TrimSpace(line) + if line == "" { + continue + } + if strings.HasPrefix(line, currentWorkspacePrefix) { + line = strings.TrimPrefix(line, currentWorkspacePrefix) + current = line + } + workspaces = append(workspaces, line) + } + + return workspaces, current +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_new.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_new.go new file mode 100644 index 000000000..2e05ffdb7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_new.go @@ -0,0 +1,83 @@ +package tfexec + +import ( + "context" + "fmt" + "os/exec" + "strconv" +) + +type workspaceNewConfig struct { + lock bool + lockTimeout string + copyState string +} + +var defaultWorkspaceNewOptions = workspaceNewConfig{ + lock: true, + lockTimeout: "0s", +} + +// WorkspaceNewCmdOption represents options that are applicable to the WorkspaceNew method. +type WorkspaceNewCmdOption interface { + configureWorkspaceNew(*workspaceNewConfig) +} + +func (opt *LockOption) configureWorkspaceNew(conf *workspaceNewConfig) { + conf.lock = opt.lock +} + +func (opt *LockTimeoutOption) configureWorkspaceNew(conf *workspaceNewConfig) { + conf.lockTimeout = opt.timeout +} + +func (opt *CopyStateOption) configureWorkspaceNew(conf *workspaceNewConfig) { + conf.copyState = opt.path +} + +// WorkspaceNew represents the workspace new subcommand to the Terraform CLI. +func (tf *Terraform) WorkspaceNew(ctx context.Context, workspace string, opts ...WorkspaceNewCmdOption) error { + cmd, err := tf.workspaceNewCmd(ctx, workspace, opts...) + if err != nil { + return err + } + return tf.runTerraformCmd(ctx, cmd) +} + +func (tf *Terraform) workspaceNewCmd(ctx context.Context, workspace string, opts ...WorkspaceNewCmdOption) (*exec.Cmd, error) { + // TODO: [DIR] param option + + c := defaultWorkspaceNewOptions + + for _, o := range opts { + switch o.(type) { + case *LockOption, *LockTimeoutOption: + err := tf.compatible(ctx, tf0_12_0, nil) + if err != nil { + return nil, fmt.Errorf("-lock and -lock-timeout were added to workspace new in Terraform 0.12: %w", err) + } + } + + o.configureWorkspaceNew(&c) + } + + args := []string{"workspace", "new", "-no-color"} + + if c.lockTimeout != "" && c.lockTimeout != defaultWorkspaceNewOptions.lockTimeout { + // only pass if not default, so we don't need to worry about the 0.11 version check + args = append(args, "-lock-timeout="+c.lockTimeout) + } + if !c.lock { + // only pass if false, so we don't need to worry about the 0.11 version check + args = append(args, "-lock="+strconv.FormatBool(c.lock)) + } + if c.copyState != "" { + args = append(args, "-state="+c.copyState) + } + + args = append(args, workspace) + + cmd := tf.buildTerraformCmd(ctx, nil, args...) + + return cmd, nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_select.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_select.go new file mode 100644 index 000000000..5a51330f6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_select.go @@ -0,0 +1,10 @@ +package tfexec + +import "context" + +// WorkspaceSelect represents the workspace select subcommand to the Terraform CLI. +func (tf *Terraform) WorkspaceSelect(ctx context.Context, workspace string) error { + // TODO: [DIR] param option + + return tf.runTerraformCmd(ctx, tf.buildTerraformCmd(ctx, nil, "workspace", "select", "-no-color", workspace)) +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_show.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_show.go new file mode 100644 index 000000000..7d5a267f1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_show.go @@ -0,0 +1,35 @@ +package tfexec + +import ( + "context" + "fmt" + "os/exec" + "strings" +) + +// WorkspaceShow represents the workspace show subcommand to the Terraform CLI. +func (tf *Terraform) WorkspaceShow(ctx context.Context) (string, error) { + workspaceShowCmd, err := tf.workspaceShowCmd(ctx) + if err != nil { + return "", err + } + + var outBuffer strings.Builder + workspaceShowCmd.Stdout = &outBuffer + + err = tf.runTerraformCmd(ctx, workspaceShowCmd) + if err != nil { + return "", err + } + + return strings.TrimSpace(outBuffer.String()), nil +} + +func (tf *Terraform) workspaceShowCmd(ctx context.Context) (*exec.Cmd, error) { + err := tf.compatible(ctx, tf0_10_0, nil) + if err != nil { + return nil, fmt.Errorf("workspace show was first introduced in Terraform 0.10.0: %w", err) + } + + return tf.buildTerraformCmd(ctx, nil, "workspace", "show", "-no-color"), nil +} diff --git a/vendor/github.com/hashicorp/terraform-json/.gitignore b/vendor/github.com/hashicorp/terraform-json/.gitignore new file mode 100644 index 000000000..15b499b99 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-json/.gitignore @@ -0,0 +1,3 @@ +.terraform +plan.tfplan +terraform.tfstate.backup diff --git a/vendor/github.com/hashicorp/terraform-json/.go-version b/vendor/github.com/hashicorp/terraform-json/.go-version new file mode 100644 index 000000000..e71519696 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-json/.go-version @@ -0,0 +1 @@ +1.16 diff --git a/vendor/github.com/hashicorp/terraform-json/LICENSE b/vendor/github.com/hashicorp/terraform-json/LICENSE new file mode 100644 index 000000000..a612ad981 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-json/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/terraform-json/Makefile b/vendor/github.com/hashicorp/terraform-json/Makefile new file mode 100644 index 000000000..bb93c7f9a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-json/Makefile @@ -0,0 +1,21 @@ +GOTOOLS = \ + gotest.tools/gotestsum@latest + +test: tools + gotestsum --format=short-verbose $(TEST) $(TESTARGS) + +generate: + cd testdata && make generate + +modules: + go mod download && go mod verify + +test-circle: + mkdir -p test-results/terraform-json + gotestsum --format=short-verbose --junitfile test-results/terraform-json/results.xml + +tools: + @echo $(GOTOOLS) | xargs -t -n1 go install + go mod tidy + +.PHONY: test generate modules test-circle tools diff --git a/vendor/github.com/hashicorp/terraform-json/README.md b/vendor/github.com/hashicorp/terraform-json/README.md new file mode 100644 index 000000000..fea0ba260 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-json/README.md @@ -0,0 +1,33 @@ +# terraform-json + +[![CircleCI](https://circleci.com/gh/hashicorp/terraform-json/tree/main.svg?style=svg)](https://circleci.com/gh/hashicorp/terraform-json/tree/main) +[![GoDoc](https://godoc.org/github.com/hashicorp/terraform-json?status.svg)](https://godoc.org/github.com/hashicorp/terraform-json) + +This repository houses data types designed to help parse the data produced by +two [Terraform](https://www.terraform.io/) commands: + +* [`terraform show -json`](https://www.terraform.io/docs/commands/show.html#json-output) +* [`terraform providers schema -json`](https://www.terraform.io/docs/commands/providers/schema.html#json) + +While containing mostly data types, there are also a few helpers to assist with +working with the data. + +This repository also serves as de facto documentation for the formats produced +by these commands. For more details, see the +[GoDoc](https://godoc.org/github.com/hashicorp/terraform-json). + +## Why a Separate Repository? + +To reduce dependencies on any of Terraform core's internals, we've made a design +decision to make any helpers or libraries that work with the external JSON data +external and not a part of the Terraform GitHub repository itself. + +While Terraform core will change often and be relatively unstable, this library +will see a smaller amount of change. Most of the major changes have already +happened leading up to 0.12, so you can expect this library to only see minor +incremental changes going forward. + +For this reason, `terraform show -json` and `terraform providers schema -json` +is the recommended format for working with Terraform data externally, and as +such, if you require any help working with the data in these formats, or even a +reference of how the JSON is formatted, use this repository. diff --git a/vendor/github.com/hashicorp/terraform-json/action.go b/vendor/github.com/hashicorp/terraform-json/action.go new file mode 100644 index 000000000..51c4c8369 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-json/action.go @@ -0,0 +1,104 @@ +package tfjson + +// Action is a valid action type for a resource change. +// +// Note that a singular Action is not telling of a full resource +// change operation. Certain resource actions, such as replacement, +// are a composite of more than one type. See the Actions type and +// its helpers for more information. +type Action string + +const ( + // ActionNoop denotes a no-op operation. + ActionNoop Action = "no-op" + + // ActionCreate denotes a create operation. + ActionCreate Action = "create" + + // ActionRead denotes a read operation. + ActionRead Action = "read" + + // ActionUpdate denotes an update operation. + ActionUpdate Action = "update" + + // ActionDelete denotes a delete operation. + ActionDelete Action = "delete" +) + +// Actions denotes a valid change type. +type Actions []Action + +// NoOp is true if this set of Actions denotes a no-op. +func (a Actions) NoOp() bool { + if len(a) != 1 { + return false + } + + return a[0] == ActionNoop +} + +// Create is true if this set of Actions denotes creation of a new +// resource. +func (a Actions) Create() bool { + if len(a) != 1 { + return false + } + + return a[0] == ActionCreate +} + +// Read is true if this set of Actions denotes a read operation only. +func (a Actions) Read() bool { + if len(a) != 1 { + return false + } + + return a[0] == ActionRead +} + +// Update is true if this set of Actions denotes an update operation. +func (a Actions) Update() bool { + if len(a) != 1 { + return false + } + + return a[0] == ActionUpdate +} + +// Delete is true if this set of Actions denotes resource removal. +func (a Actions) Delete() bool { + if len(a) != 1 { + return false + } + + return a[0] == ActionDelete +} + +// DestroyBeforeCreate is true if this set of Actions denotes a +// destroy-before-create operation. This is the standard resource +// replacement method. +func (a Actions) DestroyBeforeCreate() bool { + if len(a) != 2 { + return false + } + + return a[0] == ActionDelete && a[1] == ActionCreate +} + +// CreateBeforeDestroy is true if this set of Actions denotes a +// create-before-destroy operation, usually the result of replacement +// to a resource that has the create_before_destroy lifecycle option +// set. +func (a Actions) CreateBeforeDestroy() bool { + if len(a) != 2 { + return false + } + + return a[0] == ActionCreate && a[1] == ActionDelete +} + +// Replace is true if this set of Actions denotes a valid replacement +// operation. +func (a Actions) Replace() bool { + return a.DestroyBeforeCreate() || a.CreateBeforeDestroy() +} diff --git a/vendor/github.com/hashicorp/terraform-json/config.go b/vendor/github.com/hashicorp/terraform-json/config.go new file mode 100644 index 000000000..5ebe4bc84 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-json/config.go @@ -0,0 +1,194 @@ +package tfjson + +import ( + "encoding/json" + "errors" +) + +// Config represents the complete configuration source. +type Config struct { + // A map of all provider instances across all modules in the + // configuration. + // + // The index for this field is opaque and should not be parsed. Use + // the individual fields in ProviderConfig to discern actual data + // about the provider such as name, alias, or defined module. + ProviderConfigs map[string]*ProviderConfig `json:"provider_config,omitempty"` + + // The root module in the configuration. Any child modules descend + // off of here. + RootModule *ConfigModule `json:"root_module,omitempty"` +} + +// Validate checks to ensure that the config is present. +func (c *Config) Validate() error { + if c == nil { + return errors.New("config is nil") + } + + return nil +} + +func (c *Config) UnmarshalJSON(b []byte) error { + type rawConfig Config + var config rawConfig + + err := json.Unmarshal(b, &config) + if err != nil { + return err + } + + *c = *(*Config)(&config) + + return c.Validate() +} + +// ProviderConfig describes a provider configuration instance. +type ProviderConfig struct { + // The name of the provider, ie: "aws". + Name string `json:"name,omitempty"` + + // The fully-specified name of the provider, ie: "registry.terraform.io/hashicorp/aws". + FullName string `json:"full_name,omitempty"` + + // The alias of the provider, ie: "us-east-1". + Alias string `json:"alias,omitempty"` + + // The address of the module the provider is declared in. + ModuleAddress string `json:"module_address,omitempty"` + + // Any non-special configuration values in the provider, indexed by + // key. + Expressions map[string]*Expression `json:"expressions,omitempty"` + + // The defined version constraint for this provider. + VersionConstraint string `json:"version_constraint,omitempty"` +} + +// ConfigModule describes a module in Terraform configuration. +type ConfigModule struct { + // The outputs defined in the module. + Outputs map[string]*ConfigOutput `json:"outputs,omitempty"` + + // The resources defined in the module. + Resources []*ConfigResource `json:"resources,omitempty"` + + // Any "module" stanzas within the specific module. + ModuleCalls map[string]*ModuleCall `json:"module_calls,omitempty"` + + // The variables defined in the module. + Variables map[string]*ConfigVariable `json:"variables,omitempty"` +} + +// ConfigOutput defines an output as defined in configuration. +type ConfigOutput struct { + // Indicates whether or not the output was marked as sensitive. + Sensitive bool `json:"sensitive,omitempty"` + + // The defined value of the output. + Expression *Expression `json:"expression,omitempty"` + + // The defined description of this output. + Description string `json:"description,omitempty"` + + // The defined dependencies tied to this output. + DependsOn []string `json:"depends_on,omitempty"` +} + +// ConfigResource is the configuration representation of a resource. +type ConfigResource struct { + // The address of the resource relative to the module that it is + // in. + Address string `json:"address,omitempty"` + + // The resource mode. + Mode ResourceMode `json:"mode,omitempty"` + + // The type of resource, ie: "null_resource" in + // "null_resource.foo". + Type string `json:"type,omitempty"` + + // The name of the resource, ie: "foo" in "null_resource.foo". + Name string `json:"name,omitempty"` + + // An opaque key representing the provider configuration this + // module uses. Note that there are more than one circumstance that + // this key will not match what is found in the ProviderConfigs + // field in the root Config structure, and as such should not be + // relied on for that purpose. + ProviderConfigKey string `json:"provider_config_key,omitempty"` + + // The list of provisioner defined for this configuration. This + // will be nil if no providers are defined. + Provisioners []*ConfigProvisioner `json:"provisioners,omitempty"` + + // Any non-special configuration values in the resource, indexed by + // key. + Expressions map[string]*Expression `json:"expressions,omitempty"` + + // The resource's configuration schema version. With access to the + // specific Terraform provider for this resource, this can be used + // to determine the correct schema for the configuration data + // supplied in Expressions. + SchemaVersion uint64 `json:"schema_version"` + + // The expression data for the "count" value in the resource. + CountExpression *Expression `json:"count_expression,omitempty"` + + // The expression data for the "for_each" value in the resource. + ForEachExpression *Expression `json:"for_each_expression,omitempty"` + + // The contents of the "depends_on" config directive, which + // declares explicit dependencies for this resource. + DependsOn []string `json:"depends_on,omitempty"` +} + +// ConfigVariable defines a variable as defined in configuration. +type ConfigVariable struct { + // The defined default value of the variable. + Default interface{} `json:"default,omitempty"` + + // The defined text description of the variable. + Description string `json:"description,omitempty"` + + // Whether the variable is marked as sensitive + Sensitive bool `json:"sensitive,omitempty"` +} + +// ConfigProvisioner describes a provisioner declared in a resource +// configuration. +type ConfigProvisioner struct { + // The type of the provisioner, ie: "local-exec". + Type string `json:"type,omitempty"` + + // Any non-special configuration values in the provisioner, indexed by + // key. + Expressions map[string]*Expression `json:"expressions,omitempty"` +} + +// ModuleCall describes a declared "module" within a configuration. +// It also contains the data for the module itself. +type ModuleCall struct { + // The contents of the "source" field. + Source string `json:"source,omitempty"` + + // Any non-special configuration values in the module, indexed by + // key. + Expressions map[string]*Expression `json:"expressions,omitempty"` + + // The expression data for the "count" value in the module. + CountExpression *Expression `json:"count_expression,omitempty"` + + // The expression data for the "for_each" value in the module. + ForEachExpression *Expression `json:"for_each_expression,omitempty"` + + // The configuration data for the module itself. + Module *ConfigModule `json:"module,omitempty"` + + // The version constraint for modules that come from the registry. + VersionConstraint string `json:"version_constraint,omitempty"` + + // The explicit resource dependencies for the "depends_on" value. + // As it must be a slice of references, Expression is not used. + DependsOn []string `json:"depends_on,omitempty"` +} diff --git a/vendor/github.com/hashicorp/terraform-json/expression.go b/vendor/github.com/hashicorp/terraform-json/expression.go new file mode 100644 index 000000000..8a39face7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-json/expression.go @@ -0,0 +1,127 @@ +package tfjson + +import "encoding/json" + +type unknownConstantValue struct{} + +// UnknownConstantValue is a singleton type that denotes that a +// constant value is explicitly unknown. This is set during an +// unmarshal when references are found in an expression to help more +// explicitly differentiate between an explicit null and unknown +// value. +var UnknownConstantValue = &unknownConstantValue{} + +// Expression describes the format for an individual key in a +// Terraform configuration. +// +// This struct wraps ExpressionData to support custom JSON parsing. +type Expression struct { + *ExpressionData +} + +// ExpressionData describes the format for an individual key in a +// Terraform configuration. +type ExpressionData struct { + // If the *entire* expression is a constant-defined value, this + // will contain the Go representation of the expression's data. + // + // Note that a nil here denotes and explicit null. When a value is + // unknown on part of the value coming from an expression that + // cannot be resolved at parse time, this field will contain + // UnknownConstantValue. + ConstantValue interface{} `json:"constant_value,omitempty"` + + // If any part of the expression contained values that were not + // able to be resolved at parse-time, this will contain a list of + // the referenced identifiers that caused the value to be unknown. + References []string `json:"references,omitempty"` + + // A list of complex objects that were nested in this expression. + // If this value is a nested block in configuration, sometimes + // referred to as a "sub-resource", this field will contain those + // values, and ConstantValue and References will be blank. + NestedBlocks []map[string]*Expression `json:"-"` +} + +// UnmarshalJSON implements json.Unmarshaler for Expression. +func (e *Expression) UnmarshalJSON(b []byte) error { + result := new(ExpressionData) + + // Check to see if this is an array first. If it is, this is more + // than likely a list of nested blocks. + var rawNested []map[string]json.RawMessage + if err := json.Unmarshal(b, &rawNested); err == nil { + result.NestedBlocks, err = unmarshalExpressionBlocks(rawNested) + if err != nil { + return err + } + } else { + // It's a non-nested expression block, parse normally + if err := json.Unmarshal(b, &result); err != nil { + return err + } + + // If References is non-zero, then ConstantValue is unknown. Set + // this explicitly. + if len(result.References) > 0 { + result.ConstantValue = UnknownConstantValue + } + } + + e.ExpressionData = result + return nil +} + +func unmarshalExpressionBlocks(raw []map[string]json.RawMessage) ([]map[string]*Expression, error) { + var result []map[string]*Expression + + for _, rawBlock := range raw { + block := make(map[string]*Expression) + for k, rawExpr := range rawBlock { + var expr *Expression + if err := json.Unmarshal(rawExpr, &expr); err != nil { + return nil, err + } + + block[k] = expr + } + + result = append(result, block) + } + + return result, nil +} + +// MarshalJSON implements json.Marshaler for Expression. +func (e *Expression) MarshalJSON() ([]byte, error) { + switch { + case len(e.ExpressionData.NestedBlocks) > 0: + return marshalExpressionBlocks(e.ExpressionData.NestedBlocks) + + case e.ExpressionData.ConstantValue == UnknownConstantValue: + return json.Marshal(&ExpressionData{ + References: e.ExpressionData.References, + }) + } + + return json.Marshal(e.ExpressionData) +} + +func marshalExpressionBlocks(nested []map[string]*Expression) ([]byte, error) { + var rawNested []map[string]json.RawMessage + for _, block := range nested { + rawBlock := make(map[string]json.RawMessage) + for k, expr := range block { + raw, err := json.Marshal(expr) + if err != nil { + return nil, err + } + + rawBlock[k] = raw + } + + rawNested = append(rawNested, rawBlock) + } + + return json.Marshal(rawNested) +} diff --git a/vendor/github.com/hashicorp/terraform-json/plan.go b/vendor/github.com/hashicorp/terraform-json/plan.go new file mode 100644 index 000000000..274006a01 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-json/plan.go @@ -0,0 +1,202 @@ +package tfjson + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/hashicorp/go-version" +) + +// PlanFormatVersionConstraints defines the versions of the JSON plan format +// that are supported by this package. +var PlanFormatVersionConstraints = ">= 0.1, < 2.0" + +// ResourceMode is a string representation of the resource type found +// in certain fields in the plan. +type ResourceMode string + +const ( + // DataResourceMode is the resource mode for data sources. + DataResourceMode ResourceMode = "data" + + // ManagedResourceMode is the resource mode for managed resources. + ManagedResourceMode ResourceMode = "managed" +) + +// Plan represents the entire contents of an output Terraform plan. +type Plan struct { + // The version of the plan format. This should always match the + // PlanFormatVersion constant in this package, or else an unmarshal + // will be unstable. + FormatVersion string `json:"format_version,omitempty"` + + // The version of Terraform used to make the plan. + TerraformVersion string `json:"terraform_version,omitempty"` + + // The variables set in the root module when creating the plan. + Variables map[string]*PlanVariable `json:"variables,omitempty"` + + // The common state representation of resources within this plan. + // This is a product of the existing state merged with the diff for + // this plan. + PlannedValues *StateValues `json:"planned_values,omitempty"` + + // The change operations for resources and data sources within this + // plan. + ResourceChanges []*ResourceChange `json:"resource_changes,omitempty"` + + // The change operations for outputs within this plan. + OutputChanges map[string]*Change `json:"output_changes,omitempty"` + + // The Terraform state prior to the plan operation. This is the + // same format as PlannedValues, without the current diff merged. + PriorState *State `json:"prior_state,omitempty"` + + // The Terraform configuration used to make the plan. + Config *Config `json:"configuration,omitempty"` + + // RelevantAttributes represents any resource instances and their + // attributes which may have contributed to the planned changes + RelevantAttributes []ResourceAttribute `json:"relevant_attributes,omitempty"` +} + +// ResourceAttribute describes a full path to a resource attribute +type ResourceAttribute struct { + // Resource describes resource instance address (e.g. null_resource.foo) + Resource string `json:"resource"` + // Attribute describes the attribute path using a lossy representation + // of cty.Path. (e.g. ["id"] or ["objects", 0, "val"]). + Attribute []json.RawMessage `json:"attribute"` +} + +// Validate checks to ensure that the plan is present, and the +// version matches the version supported by this library. +func (p *Plan) Validate() error { + if p == nil { + return errors.New("plan is nil") + } + + if p.FormatVersion == "" { + return errors.New("unexpected plan input, format version is missing") + } + + constraint, err := version.NewConstraint(PlanFormatVersionConstraints) + if err != nil { + return fmt.Errorf("invalid version constraint: %w", err) + } + + version, err := version.NewVersion(p.FormatVersion) + if err != nil { + return fmt.Errorf("invalid format version %q: %w", p.FormatVersion, err) + } + + if !constraint.Check(version) { + return fmt.Errorf("unsupported plan format version: %q does not satisfy %q", + version, constraint) + } + + return nil +} + +func isStringInSlice(slice []string, s string) bool { + for _, el := range slice { + if el == s { + return true + } + } + return false +} + +func (p *Plan) UnmarshalJSON(b []byte) error { + type rawPlan Plan + var plan rawPlan + + err := json.Unmarshal(b, &plan) + if err != nil { + return err + } + + *p = *(*Plan)(&plan) + + return p.Validate() +} + +// ResourceChange is a description of an individual change action +// that Terraform plans to use to move from the prior state to a new +// state matching the configuration. +type ResourceChange struct { + // The absolute resource address. + Address string `json:"address,omitempty"` + + // The module portion of the above address. Omitted if the instance + // is in the root module. + ModuleAddress string `json:"module_address,omitempty"` + + // The resource mode. + Mode ResourceMode `json:"mode,omitempty"` + + // The resource type, example: "aws_instance" for aws_instance.foo. + Type string `json:"type,omitempty"` + + // The resource name, example: "foo" for aws_instance.foo. + Name string `json:"name,omitempty"` + + // The instance key for any resources that have been created using + // "count" or "for_each". If neither of these apply the key will be + // empty. + // + // This value can be either an integer (int) or a string. + Index interface{} `json:"index,omitempty"` + + // The name of the provider this resource belongs to. This allows + // the provider to be interpreted unambiguously in the unusual + // situation where a provider offers a resource type whose name + // does not start with its own name, such as the "googlebeta" + // provider offering "google_compute_instance". + ProviderName string `json:"provider_name,omitempty"` + + // An identifier used during replacement operations, and can be + // used to identify the exact resource being replaced in state. + DeposedKey string `json:"deposed,omitempty"` + + // The data describing the change that will be made to this object. + Change *Change `json:"change,omitempty"` +} + +// Change is the representation of a proposed change for an object. +type Change struct { + // The action to be carried out by this change. + Actions Actions `json:"actions,omitempty"` + + // Before and After are representations of the object value both + // before and after the action. For create and delete actions, + // either Before or After is unset (respectively). For no-op + // actions, both values will be identical. After will be incomplete + // if there are values within it that won't be known until after + // apply. + Before interface{} `json:"before,"` + After interface{} `json:"after,omitempty"` + + // A deep object of booleans that denotes any values that are + // unknown in a resource. These values were previously referred to + // as "computed" values. + // + // If the value cannot be found in this map, then its value should + // be available within After, so long as the operation supports it. + AfterUnknown interface{} `json:"after_unknown,omitempty"` + + // BeforeSensitive and AfterSensitive are object values with similar + // structure to Before and After, but with all sensitive leaf values + // replaced with true, and all non-sensitive leaf values omitted. These + // objects should be combined with Before and After to prevent accidental + // display of sensitive values in user interfaces. + BeforeSensitive interface{} `json:"before_sensitive,omitempty"` + AfterSensitive interface{} `json:"after_sensitive,omitempty"` +} + +// PlanVariable is a top-level variable in the Terraform plan. +type PlanVariable struct { + // The value for this variable at plan time. + Value interface{} `json:"value,omitempty"` +} diff --git a/vendor/github.com/hashicorp/terraform-json/schemas.go b/vendor/github.com/hashicorp/terraform-json/schemas.go new file mode 100644 index 000000000..027224b62 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-json/schemas.go @@ -0,0 +1,281 @@ +package tfjson + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/hashicorp/go-version" + "github.com/zclconf/go-cty/cty" +) + +// ProviderSchemasFormatVersionConstraints defines the versions of the JSON +// provider schema format that are supported by this package. +var ProviderSchemasFormatVersionConstraints = ">= 0.1, < 2.0" + +// ProviderSchemas represents the schemas of all providers and +// resources in use by the configuration. +type ProviderSchemas struct { + // The version of the plan format. This should always match one of + // ProviderSchemasFormatVersions in this package, or else + // an unmarshal will be unstable. + FormatVersion string `json:"format_version,omitempty"` + + // The schemas for the providers in this configuration, indexed by + // provider type. Aliases are not included, and multiple instances + // of a provider in configuration will be represented by a single + // provider here. + Schemas map[string]*ProviderSchema `json:"provider_schemas,omitempty"` +} + +// Validate checks to ensure that ProviderSchemas is present, and the +// version matches the version supported by this library. +func (p *ProviderSchemas) Validate() error { + if p == nil { + return errors.New("provider schema data is nil") + } + + if p.FormatVersion == "" { + return errors.New("unexpected provider schema data, format version is missing") + } + + constraint, err := version.NewConstraint(PlanFormatVersionConstraints) + if err != nil { + return fmt.Errorf("invalid version constraint: %w", err) + } + + version, err := version.NewVersion(p.FormatVersion) + if err != nil { + return fmt.Errorf("invalid format version %q: %w", p.FormatVersion, err) + } + + if !constraint.Check(version) { + return fmt.Errorf("unsupported provider schema format version: %q does not satisfy %q", + version, constraint) + } + + return nil +} + +func (p *ProviderSchemas) UnmarshalJSON(b []byte) error { + type rawSchemas ProviderSchemas + var schemas rawSchemas + + err := json.Unmarshal(b, &schemas) + if err != nil { + return err + } + + *p = *(*ProviderSchemas)(&schemas) + + return p.Validate() +} + +// ProviderSchema is the JSON representation of the schema of an +// entire provider, including the provider configuration and any +// resources and data sources included with the provider. +type ProviderSchema struct { + // The schema for the provider's configuration. + ConfigSchema *Schema `json:"provider,omitempty"` + + // The schemas for any resources in this provider. + ResourceSchemas map[string]*Schema `json:"resource_schemas,omitempty"` + + // The schemas for any data sources in this provider. + DataSourceSchemas map[string]*Schema `json:"data_source_schemas,omitempty"` +} + +// Schema is the JSON representation of a particular schema +// (provider configuration, resources, data sources). +type Schema struct { + // The version of the particular resource schema. + Version uint64 `json:"version"` + + // The root-level block of configuration values. + Block *SchemaBlock `json:"block,omitempty"` +} + +// SchemaDescriptionKind describes the format type for a particular description's field. +type SchemaDescriptionKind string + +const ( + // SchemaDescriptionKindPlain indicates a string in plain text format. + SchemaDescriptionKindPlain SchemaDescriptionKind = "plain" + + // SchemaDescriptionKindMarkdown indicates a Markdown string and may need to be + // processed prior to presentation. + SchemaDescriptionKindMarkdown SchemaDescriptionKind = "markdown" +) + +// SchemaBlock represents a nested block within a particular schema. +type SchemaBlock struct { + // The attributes defined at the particular level of this block. + Attributes map[string]*SchemaAttribute `json:"attributes,omitempty"` + + // Any nested blocks within this particular block. + NestedBlocks map[string]*SchemaBlockType `json:"block_types,omitempty"` + + // The description for this block and format of the description. If + // no kind is provided, it can be assumed to be plain text. + Description string `json:"description,omitempty"` + DescriptionKind SchemaDescriptionKind `json:"description_kind,omitempty"` + + // If true, this block is deprecated. + Deprecated bool `json:"deprecated,omitempty"` +} + +// SchemaNestingMode is the nesting mode for a particular nested +// schema block. +type SchemaNestingMode string + +const ( + // SchemaNestingModeSingle denotes single block nesting mode, which + // allows a single block of this specific type only in + // configuration. This is generally the same as list or set types + // with a single-element constraint. + SchemaNestingModeSingle SchemaNestingMode = "single" + + // SchemaNestingModeGroup is similar to SchemaNestingModeSingle in that it + // calls for only a single instance of a given block type with no labels, + // but it additonally guarantees that its result will never be null, + // even if the block is absent, and instead the nested attributes + // and blocks will be treated as absent in that case. + // + // This is useful for the situation where a remote API has a feature that + // is always enabled but has a group of settings related to that feature + // that themselves have default values. By using SchemaNestingModeGroup + // instead of SchemaNestingModeSingle in that case, generated plans will + // show the block as present even when not present in configuration, + // thus allowing any default values within to be displayed to the user. + SchemaNestingModeGroup SchemaNestingMode = "group" + + // SchemaNestingModeList denotes list block nesting mode, which + // allows an ordered list of blocks where duplicates are allowed. + SchemaNestingModeList SchemaNestingMode = "list" + + // SchemaNestingModeSet denotes set block nesting mode, which + // allows an unordered list of blocks where duplicates are + // generally not allowed. What is considered a duplicate is up to + // the rules of the set itself, which may or may not cover all + // fields in the block. + SchemaNestingModeSet SchemaNestingMode = "set" + + // SchemaNestingModeMap denotes map block nesting mode. This + // creates a map of all declared blocks of the block type within + // the parent, keying them on the label supplied in the block + // declaration. This allows for blocks to be declared in the same + // style as resources. + SchemaNestingModeMap SchemaNestingMode = "map" +) + +// SchemaBlockType describes a nested block within a schema. +type SchemaBlockType struct { + // The nesting mode for this block. + NestingMode SchemaNestingMode `json:"nesting_mode,omitempty"` + + // The block data for this block type, including attributes and + // subsequent nested blocks. + Block *SchemaBlock `json:"block,omitempty"` + + // The lower limit on items that can be declared of this block + // type. + MinItems uint64 `json:"min_items,omitempty"` + + // The upper limit on items that can be declared of this block + // type. + MaxItems uint64 `json:"max_items,omitempty"` +} + +// SchemaAttribute describes an attribute within a schema block. +type SchemaAttribute struct { + // The attribute type + // Either AttributeType or AttributeNestedType is set, never both. + AttributeType cty.Type `json:"type,omitempty"` + + // Details about a nested attribute type + // Either AttributeType or AttributeNestedType is set, never both. + AttributeNestedType *SchemaNestedAttributeType `json:"nested_type,omitempty"` + + // The description field for this attribute. If no kind is + // provided, it can be assumed to be plain text. + Description string `json:"description,omitempty"` + DescriptionKind SchemaDescriptionKind `json:"description_kind,omitempty"` + + // If true, this attribute is deprecated. + Deprecated bool `json:"deprecated,omitempty"` + + // If true, this attribute is required - it has to be entered in + // configuration. + Required bool `json:"required,omitempty"` + + // If true, this attribute is optional - it does not need to be + // entered in configuration. + Optional bool `json:"optional,omitempty"` + + // If true, this attribute is computed - it can be set by the + // provider. It may also be set by configuration if Optional is + // true. + Computed bool `json:"computed,omitempty"` + + // If true, this attribute is sensitive and will not be displayed + // in logs. Future versions of Terraform may encrypt or otherwise + // treat these values with greater care than non-sensitive fields. + Sensitive bool `json:"sensitive,omitempty"` +} + +// jsonSchemaAttribute describes an attribute within a schema block +// in a middle-step internal representation before marshalled into +// a more useful SchemaAttribute with cty.Type. +// +// This avoid panic on marshalling cty.NilType (from cty upstream) +// which the default Go marshaller cannot ignore because it's a +// not nil-able struct. +type jsonSchemaAttribute struct { + AttributeType json.RawMessage `json:"type,omitempty"` + AttributeNestedType *SchemaNestedAttributeType `json:"nested_type,omitempty"` + Description string `json:"description,omitempty"` + DescriptionKind SchemaDescriptionKind `json:"description_kind,omitempty"` + Deprecated bool `json:"deprecated,omitempty"` + Required bool `json:"required,omitempty"` + Optional bool `json:"optional,omitempty"` + Computed bool `json:"computed,omitempty"` + Sensitive bool `json:"sensitive,omitempty"` +} + +func (as *SchemaAttribute) MarshalJSON() ([]byte, error) { + jsonSa := &jsonSchemaAttribute{ + AttributeNestedType: as.AttributeNestedType, + Description: as.Description, + DescriptionKind: as.DescriptionKind, + Deprecated: as.Deprecated, + Required: as.Required, + Optional: as.Optional, + Computed: as.Computed, + Sensitive: as.Sensitive, + } + if as.AttributeType != cty.NilType { + attrTy, _ := as.AttributeType.MarshalJSON() + jsonSa.AttributeType = attrTy + } + return json.Marshal(jsonSa) +} + +// SchemaNestedAttributeType describes a nested attribute +// which could also be just expressed simply as cty.Object(...), +// cty.List(cty.Object(...)) etc. but this allows tracking additional +// metadata which can help interpreting or validating the data. +type SchemaNestedAttributeType struct { + // A map of nested attributes + Attributes map[string]*SchemaAttribute `json:"attributes,omitempty"` + + // The nesting mode for this attribute. + NestingMode SchemaNestingMode `json:"nesting_mode,omitempty"` + + // The lower limit on number of items that can be declared + // of this attribute type (not applicable to single nesting mode). + MinItems uint64 `json:"min_items,omitempty"` + + // The upper limit on number of items that can be declared + // of this attribute type (not applicable to single nesting mode). + MaxItems uint64 `json:"max_items,omitempty"` +} diff --git a/vendor/github.com/hashicorp/terraform-json/state.go b/vendor/github.com/hashicorp/terraform-json/state.go new file mode 100644 index 000000000..3c3f6a4b0 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-json/state.go @@ -0,0 +1,206 @@ +package tfjson + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + + "github.com/hashicorp/go-version" + "github.com/zclconf/go-cty/cty" +) + +// StateFormatVersionConstraints defines the versions of the JSON state format +// that are supported by this package. +var StateFormatVersionConstraints = ">= 0.1, < 2.0" + +// State is the top-level representation of a Terraform state. +type State struct { + // useJSONNumber opts into the behavior of calling + // json.Decoder.UseNumber prior to decoding the state, which turns + // numbers into json.Numbers instead of float64s. Set it using + // State.UseJSONNumber. + useJSONNumber bool + + // The version of the state format. This should always match the + // StateFormatVersion constant in this package, or else am + // unmarshal will be unstable. + FormatVersion string `json:"format_version,omitempty"` + + // The Terraform version used to make the state. + TerraformVersion string `json:"terraform_version,omitempty"` + + // The values that make up the state. + Values *StateValues `json:"values,omitempty"` +} + +// UseJSONNumber controls whether the State will be decoded using the +// json.Number behavior or the float64 behavior. When b is true, the State will +// represent numbers in StateOutputs as json.Numbers. When b is false, the +// State will represent numbers in StateOutputs as float64s. +func (s *State) UseJSONNumber(b bool) { + s.useJSONNumber = b +} + +// Validate checks to ensure that the state is present, and the +// version matches the version supported by this library. +func (s *State) Validate() error { + if s == nil { + return errors.New("state is nil") + } + + if s.FormatVersion == "" { + return errors.New("unexpected state input, format version is missing") + } + + constraint, err := version.NewConstraint(StateFormatVersionConstraints) + if err != nil { + return fmt.Errorf("invalid version constraint: %w", err) + } + + version, err := version.NewVersion(s.FormatVersion) + if err != nil { + return fmt.Errorf("invalid format version %q: %w", s.FormatVersion, err) + } + + if !constraint.Check(version) { + return fmt.Errorf("unsupported state format version: %q does not satisfy %q", + version, constraint) + } + + return nil +} + +func (s *State) UnmarshalJSON(b []byte) error { + type rawState State + var state rawState + + dec := json.NewDecoder(bytes.NewReader(b)) + if s.useJSONNumber { + dec.UseNumber() + } + err := dec.Decode(&state) + if err != nil { + return err + } + + *s = *(*State)(&state) + + return s.Validate() +} + +// StateValues is the common representation of resolved values for both the +// prior state (which is always complete) and the planned new state. +type StateValues struct { + // The Outputs for this common state representation. + Outputs map[string]*StateOutput `json:"outputs,omitempty"` + + // The root module in this state representation. + RootModule *StateModule `json:"root_module,omitempty"` +} + +// StateModule is the representation of a module in the common state +// representation. This can be the root module or a child module. +type StateModule struct { + // All resources or data sources within this module. + Resources []*StateResource `json:"resources,omitempty"` + + // The absolute module address, omitted for the root module. + Address string `json:"address,omitempty"` + + // Any child modules within this module. + ChildModules []*StateModule `json:"child_modules,omitempty"` +} + +// StateResource is the representation of a resource in the common +// state representation. +type StateResource struct { + // The absolute resource address. + Address string `json:"address,omitempty"` + + // The resource mode. + Mode ResourceMode `json:"mode,omitempty"` + + // The resource type, example: "aws_instance" for aws_instance.foo. + Type string `json:"type,omitempty"` + + // The resource name, example: "foo" for aws_instance.foo. + Name string `json:"name,omitempty"` + + // The instance key for any resources that have been created using + // "count" or "for_each". If neither of these apply the key will be + // empty. + // + // This value can be either an integer (int) or a string. + Index interface{} `json:"index,omitempty"` + + // The name of the provider this resource belongs to. This allows + // the provider to be interpreted unambiguously in the unusual + // situation where a provider offers a resource type whose name + // does not start with its own name, such as the "googlebeta" + // provider offering "google_compute_instance". + ProviderName string `json:"provider_name,omitempty"` + + // The version of the resource type schema the "values" property + // conforms to. + SchemaVersion uint64 `json:"schema_version,"` + + // The JSON representation of the attribute values of the resource, + // whose structure depends on the resource type schema. Any unknown + // values are omitted or set to null, making them indistinguishable + // from absent values. + AttributeValues map[string]interface{} `json:"values,omitempty"` + + // The JSON representation of the sensitivity of the resource's + // attribute values. Only attributes which are sensitive + // are included in this structure. + SensitiveValues json.RawMessage `json:"sensitive_values,omitempty"` + + // The addresses of the resources that this resource depends on. + DependsOn []string `json:"depends_on,omitempty"` + + // If true, the resource has been marked as tainted and will be + // re-created on the next update. + Tainted bool `json:"tainted,omitempty"` + + // DeposedKey is set if the resource instance has been marked Deposed and + // will be destroyed on the next apply. + DeposedKey string `json:"deposed_key,omitempty"` +} + +// StateOutput represents an output value in a common state +// representation. +type StateOutput struct { + // Whether or not the output was marked as sensitive. + Sensitive bool `json:"sensitive"` + + // The value of the output. + Value interface{} `json:"value,omitempty"` + + // The type of the output. + Type cty.Type `json:"type,omitempty"` +} + +// jsonStateOutput describes an output value in a middle-step internal +// representation before marshalled into a more useful StateOutput with cty.Type. +// +// This avoid panic on marshalling cty.NilType (from cty upstream) +// which the default Go marshaller cannot ignore because it's a +// not nil-able struct. +type jsonStateOutput struct { + Sensitive bool `json:"sensitive"` + Value interface{} `json:"value,omitempty"` + Type json.RawMessage `json:"type,omitempty"` +} + +func (so *StateOutput) MarshalJSON() ([]byte, error) { + jsonSa := &jsonStateOutput{ + Sensitive: so.Sensitive, + Value: so.Value, + } + if so.Type != cty.NilType { + outputType, _ := so.Type.MarshalJSON() + jsonSa.Type = outputType + } + return json.Marshal(jsonSa) +} diff --git a/vendor/github.com/hashicorp/terraform-json/tfjson.go b/vendor/github.com/hashicorp/terraform-json/tfjson.go new file mode 100644 index 000000000..55f9ac444 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-json/tfjson.go @@ -0,0 +1,9 @@ +// Package tfjson is a de-coupled helper library containing types for +// the plan format output by "terraform show -json" command. This +// command is designed for the export of Terraform plan data in +// a format that can be easily processed by tools unrelated to +// Terraform. +// +// This format is stable and should be used over the binary plan data +// whenever possible. +package tfjson diff --git a/vendor/github.com/hashicorp/terraform-json/validate.go b/vendor/github.com/hashicorp/terraform-json/validate.go new file mode 100644 index 000000000..97b82d0a9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-json/validate.go @@ -0,0 +1,149 @@ +package tfjson + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/hashicorp/go-version" +) + +// ValidateFormatVersionConstraints defines the versions of the JSON +// validate format that are supported by this package. +var ValidateFormatVersionConstraints = ">= 0.1, < 2.0" + +// Pos represents a position in a config file +type Pos struct { + Line int `json:"line"` + Column int `json:"column"` + Byte int `json:"byte"` +} + +// Range represents a range of bytes between two positions +type Range struct { + Filename string `json:"filename"` + Start Pos `json:"start"` + End Pos `json:"end"` +} + +type DiagnosticSeverity string + +// These severities map to the tfdiags.Severity values, plus an explicit +// unknown in case that enum grows without us noticing here. +const ( + DiagnosticSeverityUnknown DiagnosticSeverity = "unknown" + DiagnosticSeverityError DiagnosticSeverity = "error" + DiagnosticSeverityWarning DiagnosticSeverity = "warning" +) + +// Diagnostic represents information to be presented to a user about an +// error or anomaly in parsing or evaluating configuration +type Diagnostic struct { + Severity DiagnosticSeverity `json:"severity,omitempty"` + + Summary string `json:"summary,omitempty"` + Detail string `json:"detail,omitempty"` + Range *Range `json:"range,omitempty"` + + Snippet *DiagnosticSnippet `json:"snippet,omitempty"` +} + +// DiagnosticSnippet represents source code information about the diagnostic. +// It is possible for a diagnostic to have a source (and therefore a range) but +// no source code can be found. In this case, the range field will be present and +// the snippet field will not. +type DiagnosticSnippet struct { + // Context is derived from HCL's hcled.ContextString output. This gives a + // high-level summary of the root context of the diagnostic: for example, + // the resource block in which an expression causes an error. + Context *string `json:"context"` + + // Code is a possibly-multi-line string of Terraform configuration, which + // includes both the diagnostic source and any relevant context as defined + // by the diagnostic. + Code string `json:"code"` + + // StartLine is the line number in the source file for the first line of + // the snippet code block. This is not necessarily the same as the value of + // Range.Start.Line, as it is possible to have zero or more lines of + // context source code before the diagnostic range starts. + StartLine int `json:"start_line"` + + // HighlightStartOffset is the character offset into Code at which the + // diagnostic source range starts, which ought to be highlighted as such by + // the consumer of this data. + HighlightStartOffset int `json:"highlight_start_offset"` + + // HighlightEndOffset is the character offset into Code at which the + // diagnostic source range ends. + HighlightEndOffset int `json:"highlight_end_offset"` + + // Values is a sorted slice of expression values which may be useful in + // understanding the source of an error in a complex expression. + Values []DiagnosticExpressionValue `json:"values"` +} + +// DiagnosticExpressionValue represents an HCL traversal string (e.g. +// "var.foo") and a statement about its value while the expression was +// evaluated (e.g. "is a string", "will be known only after apply"). These are +// intended to help the consumer diagnose why an expression caused a diagnostic +// to be emitted. +type DiagnosticExpressionValue struct { + Traversal string `json:"traversal"` + Statement string `json:"statement"` +} + +// ValidateOutput represents JSON output from terraform validate +// (available from 0.12 onwards) +type ValidateOutput struct { + FormatVersion string `json:"format_version"` + + Valid bool `json:"valid"` + ErrorCount int `json:"error_count"` + WarningCount int `json:"warning_count"` + Diagnostics []Diagnostic `json:"diagnostics"` +} + +// Validate checks to ensure that data is present, and the +// version matches the version supported by this library. +func (vo *ValidateOutput) Validate() error { + if vo == nil { + return errors.New("validation output is nil") + } + + if vo.FormatVersion == "" { + // The format was not versioned in the past + return nil + } + + constraint, err := version.NewConstraint(ValidateFormatVersionConstraints) + if err != nil { + return fmt.Errorf("invalid version constraint: %w", err) + } + + version, err := version.NewVersion(vo.FormatVersion) + if err != nil { + return fmt.Errorf("invalid format version %q: %w", vo.FormatVersion, err) + } + + if !constraint.Check(version) { + return fmt.Errorf("unsupported validation output format version: %q does not satisfy %q", + version, constraint) + } + + return nil +} + +func (vo *ValidateOutput) UnmarshalJSON(b []byte) error { + type rawOutput ValidateOutput + var schemas rawOutput + + err := json.Unmarshal(b, &schemas) + if err != nil { + return err + } + + *vo = *(*ValidateOutput)(&schemas) + + return vo.Validate() +} diff --git a/vendor/github.com/hashicorp/terraform-json/version.go b/vendor/github.com/hashicorp/terraform-json/version.go new file mode 100644 index 000000000..16f0a853e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-json/version.go @@ -0,0 +1,11 @@ +package tfjson + +// VersionOutput represents output from the version -json command +// added in v0.13 +type VersionOutput struct { + Version string `json:"terraform_version"` + Revision string `json:"terraform_revision"` + Platform string `json:"platform,omitempty"` + ProviderSelections map[string]string `json:"provider_selections"` + Outdated bool `json:"terraform_outdated"` +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-docs/LICENSE b/vendor/github.com/hashicorp/terraform-plugin-docs/LICENSE new file mode 100644 index 000000000..a612ad981 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-docs/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/terraform-plugin-docs/cmd/tfplugindocs/main.go b/vendor/github.com/hashicorp/terraform-plugin-docs/cmd/tfplugindocs/main.go new file mode 100644 index 000000000..1bf63bf1a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-docs/cmd/tfplugindocs/main.go @@ -0,0 +1,26 @@ +package main + +import ( + "os" + + "github.com/mattn/go-colorable" + + "github.com/hashicorp/terraform-plugin-docs/internal/cmd" +) + +func main() { + name := "tfplugindocs" + version := name + " Version " + version + if commit != "" { + version += " from commit " + commit + } + + os.Exit(cmd.Run( + name, + version, + os.Args[1:], + os.Stdin, + colorable.NewColorableStdout(), + colorable.NewColorableStderr(), + )) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-docs/cmd/tfplugindocs/version.go b/vendor/github.com/hashicorp/terraform-plugin-docs/cmd/tfplugindocs/version.go new file mode 100644 index 000000000..1e63a05b4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-docs/cmd/tfplugindocs/version.go @@ -0,0 +1,7 @@ +package main + +var ( + // These vars will be set by goreleaser. + version string = `dev` + commit string = `` +) diff --git a/vendor/github.com/hashicorp/terraform-plugin-docs/internal/cmd/generate.go b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/cmd/generate.go new file mode 100644 index 000000000..a48a94a24 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/cmd/generate.go @@ -0,0 +1,109 @@ +package cmd + +import ( + "flag" + "fmt" + "strings" + + "github.com/hashicorp/terraform-plugin-docs/internal/provider" +) + +type generateCmd struct { + commonCmd + + flagLegacySidebar bool + + flagProviderName string + flagRenderedProviderName string + + flagRenderedWebsiteDir string + flagExamplesDir string + flagWebsiteTmpDir string + flagWebsiteSourceDir string + tfVersion string +} + +func (cmd *generateCmd) Synopsis() string { + return "generates a plugin website from code, templates, and examples for the current directory" +} + +func (cmd *generateCmd) Help() string { + strBuilder := &strings.Builder{} + + longestName := 0 + longestUsage := 0 + cmd.Flags().VisitAll(func(f *flag.Flag) { + if len(f.Name) > longestName { + longestName = len(f.Name) + } + if len(f.Usage) > longestUsage { + longestUsage = len(f.Usage) + } + }) + + strBuilder.WriteString(fmt.Sprintf("\nUsage: tfplugindocs generate []\n\n")) + cmd.Flags().VisitAll(func(f *flag.Flag) { + if f.DefValue != "" { + strBuilder.WriteString(fmt.Sprintf(" --%s %s%s%s (default: %q)\n", + f.Name, + strings.Repeat(" ", longestName-len(f.Name)+2), + f.Usage, + strings.Repeat(" ", longestUsage-len(f.Usage)+2), + f.DefValue, + )) + } else { + strBuilder.WriteString(fmt.Sprintf(" --%s %s%s%s\n", + f.Name, + strings.Repeat(" ", longestName-len(f.Name)+2), + f.Usage, + strings.Repeat(" ", longestUsage-len(f.Usage)+2), + )) + } + }) + strBuilder.WriteString("\n") + + return strBuilder.String() +} + +func (cmd *generateCmd) Flags() *flag.FlagSet { + fs := flag.NewFlagSet("generate", flag.ExitOnError) + fs.BoolVar(&cmd.flagLegacySidebar, "legacy-sidebar", false, "generate the legacy .erb sidebar file") + fs.StringVar(&cmd.flagProviderName, "provider-name", "", "provider name, as used in Terraform configurations") + fs.StringVar(&cmd.flagRenderedProviderName, "rendered-provider-name", "", "provider name, as generated in documentation (ex. page titles, ...)") + fs.StringVar(&cmd.flagRenderedWebsiteDir, "rendered-website-dir", "docs", "output directory") + fs.StringVar(&cmd.flagExamplesDir, "examples-dir", "examples", "examples directory") + fs.StringVar(&cmd.flagWebsiteTmpDir, "website-temp-dir", "", "temporary directory (used during generation)") + fs.StringVar(&cmd.flagWebsiteSourceDir, "website-source-dir", "templates", "templates directory") + fs.StringVar(&cmd.tfVersion, "tf-version", "", "terraform binary version to download") + return fs +} + +func (cmd *generateCmd) Run(args []string) int { + fs := cmd.Flags() + err := fs.Parse(args) + if err != nil { + cmd.ui.Error(fmt.Sprintf("unable to parse flags: %s", err)) + return 1 + } + + return cmd.run(cmd.runInternal) +} + +func (cmd *generateCmd) runInternal() error { + err := provider.Generate( + cmd.ui, + cmd.flagLegacySidebar, + cmd.flagProviderName, + cmd.flagRenderedProviderName, + cmd.flagRenderedWebsiteDir, + cmd.flagExamplesDir, + cmd.flagWebsiteTmpDir, + cmd.flagWebsiteSourceDir, + cmd.tfVersion, + ) + if err != nil { + return fmt.Errorf("unable to generate website: %w", err) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-docs/internal/cmd/run.go b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/cmd/run.go new file mode 100644 index 000000000..9438288e4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/cmd/run.go @@ -0,0 +1,99 @@ +package cmd + +import ( + "fmt" + "io" + "os" + + "github.com/mitchellh/cli" +) + +type commonCmd struct { + ui cli.Ui +} + +func (cmd *commonCmd) run(r func() error) int { + err := r() + if err != nil { + // TODO: unwraps? check for special exit code error? + cmd.ui.Error(fmt.Sprintf("Error executing command: %s\n", err)) + os.Exit(1) + } + return 0 +} + +func initCommands(ui cli.Ui) map[string]cli.CommandFactory { + + generateFactory := func() (cli.Command, error) { + return &generateCmd{ + commonCmd: commonCmd{ + ui: ui, + }, + }, nil + } + + defaultFactory := func() (cli.Command, error) { + return &defaultCmd{ + synopsis: "the generate command is run by default", + Command: &generateCmd{ + commonCmd: commonCmd{ + ui: ui, + }, + }, + }, nil + } + + validateFactory := func() (cli.Command, error) { + return &validateCmd{ + commonCmd: commonCmd{ + ui: ui, + }, + }, nil + } + + return map[string]cli.CommandFactory{ + "": defaultFactory, + "generate": generateFactory, + "validate": validateFactory, + //"serve": serveFactory, + } +} + +type defaultCmd struct { + cli.Command + synopsis string +} + +func (cmd *defaultCmd) Synopsis() string { + return cmd.synopsis +} + +func Run(name, version string, args []string, stdin io.Reader, stdout, stderr io.Writer) int { + var ui cli.Ui = &cli.ColoredUi{ + ErrorColor: cli.UiColorRed, + WarnColor: cli.UiColorYellow, + + Ui: &cli.BasicUi{ + Reader: stdin, + Writer: stdout, + ErrorWriter: stderr, + }, + } + + commands := initCommands(ui) + + cli := cli.CLI{ + Name: name, + Args: args, + Commands: commands, + HelpFunc: cli.BasicHelpFunc(name), + HelpWriter: stderr, + Version: version, + } + + exitCode, err := cli.Run() + if err != nil { + return 1 + } + return exitCode +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-docs/internal/cmd/serve.go b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/cmd/serve.go new file mode 100644 index 000000000..4e8f21eed --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/cmd/serve.go @@ -0,0 +1,3 @@ +package cmd + +//TODO: this command can run an emulated server to simulate the registry diff --git a/vendor/github.com/hashicorp/terraform-plugin-docs/internal/cmd/validate.go b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/cmd/validate.go new file mode 100644 index 000000000..0f54c5668 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/cmd/validate.go @@ -0,0 +1,80 @@ +package cmd + +import ( + "flag" + "fmt" + "strings" + + "github.com/hashicorp/terraform-plugin-docs/internal/provider" +) + +type validateCmd struct { + commonCmd +} + +func (cmd *validateCmd) Synopsis() string { + return "validates a plugin website for the current directory" +} + +func (cmd *validateCmd) Help() string { + strBuilder := &strings.Builder{} + + longestName := 0 + longestUsage := 0 + cmd.Flags().VisitAll(func(f *flag.Flag) { + if len(f.Name) > longestName { + longestName = len(f.Name) + } + if len(f.Usage) > longestUsage { + longestUsage = len(f.Usage) + } + }) + + strBuilder.WriteString(fmt.Sprintf("\nUsage: tfplugindocs validate []\n\n")) + cmd.Flags().VisitAll(func(f *flag.Flag) { + if f.DefValue != "" { + strBuilder.WriteString(fmt.Sprintf(" --%s %s%s%s (default: %q)\n", + f.Name, + strings.Repeat(" ", longestName-len(f.Name)+2), + f.Usage, + strings.Repeat(" ", longestUsage-len(f.Usage)+2), + f.DefValue, + )) + } else { + strBuilder.WriteString(fmt.Sprintf(" --%s %s%s%s\n", + f.Name, + strings.Repeat(" ", longestName-len(f.Name)+2), + f.Usage, + strings.Repeat(" ", longestUsage-len(f.Usage)+2), + )) + } + }) + strBuilder.WriteString("\n") + + return strBuilder.String() +} + +func (cmd *validateCmd) Flags() *flag.FlagSet { + fs := flag.NewFlagSet("validate", flag.ExitOnError) + return fs +} + +func (cmd *validateCmd) Run(args []string) int { + fs := cmd.Flags() + err := fs.Parse(args) + if err != nil { + cmd.ui.Error(fmt.Sprintf("unable to parse flags: %s", err)) + return 1 + } + + return cmd.run(cmd.runInternal) +} + +func (cmd *validateCmd) runInternal() error { + err := provider.Validate(cmd.ui) + if err != nil { + return fmt.Errorf("unable to validate website: %w", err) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-docs/internal/mdplain/mdplain.go b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/mdplain/mdplain.go new file mode 100644 index 000000000..ea2a77894 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/mdplain/mdplain.go @@ -0,0 +1,12 @@ +package mdplain + +import "github.com/russross/blackfriday" + +// Clean runs a VERY naive cleanup of markdown text to make it more palatable as plain text. +func PlainMarkdown(md string) (string, error) { + pt := &Text{} + + html := blackfriday.MarkdownOptions([]byte(md), pt, blackfriday.Options{}) + + return string(html), nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-docs/internal/mdplain/renderer.go b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/mdplain/renderer.go new file mode 100644 index 000000000..6dd958e6b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/mdplain/renderer.go @@ -0,0 +1,218 @@ +package mdplain + +import ( + "bytes" + + "github.com/russross/blackfriday" +) + +type Text struct{} + +func TextRenderer() blackfriday.Renderer { + return &Text{} +} + +func (options *Text) GetFlags() int { + return 0 +} + +func (options *Text) TitleBlock(out *bytes.Buffer, text []byte) { + text = bytes.TrimPrefix(text, []byte("% ")) + text = bytes.Replace(text, []byte("\n% "), []byte("\n"), -1) + out.Write(text) + out.WriteString("\n") +} + +func (options *Text) Header(out *bytes.Buffer, text func() bool, level int, id string) { + marker := out.Len() + doubleSpace(out) + + if !text() { + out.Truncate(marker) + return + } +} + +func (options *Text) BlockHtml(out *bytes.Buffer, text []byte) { + doubleSpace(out) + out.Write(text) + out.WriteByte('\n') +} + +func (options *Text) HRule(out *bytes.Buffer) { + doubleSpace(out) +} + +func (options *Text) BlockCode(out *bytes.Buffer, text []byte, lang string) { + options.BlockCodeNormal(out, text, lang) +} + +func (options *Text) BlockCodeNormal(out *bytes.Buffer, text []byte, lang string) { + doubleSpace(out) + out.Write(text) +} + +func (options *Text) BlockQuote(out *bytes.Buffer, text []byte) { + doubleSpace(out) + out.Write(text) +} + +func (options *Text) Table(out *bytes.Buffer, header []byte, body []byte, columnData []int) { + doubleSpace(out) + out.Write(header) + out.Write(body) +} + +func (options *Text) TableRow(out *bytes.Buffer, text []byte) { + doubleSpace(out) + out.Write(text) +} + +func (options *Text) TableHeaderCell(out *bytes.Buffer, text []byte, align int) { + doubleSpace(out) + out.Write(text) +} + +func (options *Text) TableCell(out *bytes.Buffer, text []byte, align int) { + doubleSpace(out) + out.Write(text) +} + +func (options *Text) Footnotes(out *bytes.Buffer, text func() bool) { + options.HRule(out) + options.List(out, text, 0) +} + +func (options *Text) FootnoteItem(out *bytes.Buffer, name, text []byte, flags int) { + out.Write(text) +} + +func (options *Text) List(out *bytes.Buffer, text func() bool, flags int) { + marker := out.Len() + doubleSpace(out) + + if !text() { + out.Truncate(marker) + return + } +} + +func (options *Text) ListItem(out *bytes.Buffer, text []byte, flags int) { + out.Write(text) +} + +func (options *Text) Paragraph(out *bytes.Buffer, text func() bool) { + marker := out.Len() + doubleSpace(out) + + if !text() { + out.Truncate(marker) + return + } +} + +func (options *Text) AutoLink(out *bytes.Buffer, link []byte, kind int) { + out.Write(link) +} + +func (options *Text) CodeSpan(out *bytes.Buffer, text []byte) { + out.Write(text) +} + +func (options *Text) DoubleEmphasis(out *bytes.Buffer, text []byte) { + out.Write(text) +} + +func (options *Text) Emphasis(out *bytes.Buffer, text []byte) { + if len(text) == 0 { + return + } + out.Write(text) +} + +func (options *Text) Image(out *bytes.Buffer, link []byte, title []byte, alt []byte) { + return +} + +func (options *Text) LineBreak(out *bytes.Buffer) { + return +} + +func (options *Text) Link(out *bytes.Buffer, link []byte, title []byte, content []byte) { + out.Write(content) + if !isRelativeLink(link) { + out.WriteString(" ") + out.Write(link) + } + return +} + +func (options *Text) RawHtmlTag(out *bytes.Buffer, text []byte) { + return +} + +func (options *Text) TripleEmphasis(out *bytes.Buffer, text []byte) { + out.Write(text) +} + +func (options *Text) StrikeThrough(out *bytes.Buffer, text []byte) { + out.Write(text) +} + +func (options *Text) FootnoteRef(out *bytes.Buffer, ref []byte, id int) { + return +} + +func (options *Text) Entity(out *bytes.Buffer, entity []byte) { + out.Write(entity) +} + +func (options *Text) NormalText(out *bytes.Buffer, text []byte) { + out.Write(text) +} + +func (options *Text) Smartypants(out *bytes.Buffer, text []byte) { + return +} + +func (options *Text) DocumentHeader(out *bytes.Buffer) { + return +} + +func (options *Text) DocumentFooter(out *bytes.Buffer) { + return +} + +func (options *Text) TocHeader(text []byte, level int) { + return +} + +func (options *Text) TocFinalize() { + return +} + +func doubleSpace(out *bytes.Buffer) { + if out.Len() > 0 { + out.WriteByte('\n') + } +} + +func isRelativeLink(link []byte) (yes bool) { + yes = false + + // a tag begin with '#' + if link[0] == '#' { + yes = true + } + + // link begin with '/' but not '//', the second maybe a protocol relative link + if len(link) >= 2 && link[0] == '/' && link[1] != '/' { + yes = true + } + + // only the root '/' + if len(link) == 1 && link[0] == '/' { + yes = true + } + return +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-docs/internal/provider/generate.go b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/provider/generate.go new file mode 100644 index 000000000..f9fed5f8c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/provider/generate.go @@ -0,0 +1,568 @@ +package provider + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + + "github.com/hashicorp/go-version" + install "github.com/hashicorp/hc-install" + "github.com/hashicorp/hc-install/checkpoint" + "github.com/hashicorp/hc-install/fs" + "github.com/hashicorp/hc-install/product" + "github.com/hashicorp/hc-install/releases" + "github.com/hashicorp/hc-install/src" + "github.com/hashicorp/terraform-exec/tfexec" + tfjson "github.com/hashicorp/terraform-json" + "github.com/mitchellh/cli" +) + +var ( + examplesResourceFileTemplate = resourceFileTemplate("resources/{{.Name}}/resource.tf") + examplesResourceImportTemplate = resourceFileTemplate("resources/{{.Name}}/import.sh") + examplesDataSourceFileTemplate = resourceFileTemplate("data-sources/{{ .Name }}/data-source.tf") + examplesProviderFileTemplate = providerFileTemplate("provider/provider.tf") + + websiteResourceFileTemplate = resourceFileTemplate("resources/{{ .ShortName }}.md.tmpl") + websiteResourceFallbackFileTemplate = resourceFileTemplate("resources.md.tmpl") + websiteResourceFileStatic = []resourceFileTemplate{ + resourceFileTemplate("resources/{{ .ShortName }}.md"), + // TODO: warn for all of these, as they won't render? massage them to the proper output file name? + resourceFileTemplate("resources/{{ .ShortName }}.markdown"), + resourceFileTemplate("resources/{{ .ShortName }}.html.markdown"), + resourceFileTemplate("resources/{{ .ShortName }}.html.md"), + resourceFileTemplate("r/{{ .ShortName }}.markdown"), + resourceFileTemplate("r/{{ .ShortName }}.md"), + resourceFileTemplate("r/{{ .ShortName }}.html.markdown"), + resourceFileTemplate("r/{{ .ShortName }}.html.md"), + } + websiteDataSourceFileTemplate = resourceFileTemplate("data-sources/{{ .ShortName }}.md.tmpl") + websiteDataSourceFallbackFileTemplate = resourceFileTemplate("data-sources.md.tmpl") + websiteDataSourceFileStatic = []resourceFileTemplate{ + resourceFileTemplate("data-sources/{{ .ShortName }}.md"), + // TODO: warn for all of these, as they won't render? massage them to the proper output file name? + resourceFileTemplate("data-sources/{{ .ShortName }}.markdown"), + resourceFileTemplate("data-sources/{{ .ShortName }}.html.markdown"), + resourceFileTemplate("data-sources/{{ .ShortName }}.html.md"), + resourceFileTemplate("d/{{ .ShortName }}.markdown"), + resourceFileTemplate("d/{{ .ShortName }}.md"), + resourceFileTemplate("d/{{ .ShortName }}.html.markdown"), + resourceFileTemplate("d/{{ .ShortName }}.html.md"), + } + websiteProviderFileTemplate = providerFileTemplate("index.md.tmpl") + websiteProviderFileStatic = []providerFileTemplate{ + providerFileTemplate("index.markdown"), + providerFileTemplate("index.md"), + providerFileTemplate("index.html.markdown"), + providerFileTemplate("index.html.md"), + } +) + +type generator struct { + legacySidebar bool + tfVersion string + + providerName string + renderedProviderName string + renderedWebsiteDir string + examplesDir string + websiteTmpDir string + websiteSourceDir string + + ui cli.Ui +} + +func (g *generator) infof(format string, a ...interface{}) { + g.ui.Info(fmt.Sprintf(format, a...)) +} + +func (g *generator) warnf(format string, a ...interface{}) { + g.ui.Warn(fmt.Sprintf(format, a...)) +} + +func Generate(ui cli.Ui, legacySidebar bool, providerName, renderedProviderName, renderedWebsiteDir, examplesDir, websiteTmpDir, websiteSourceDir, tfVersion string) error { + g := &generator{ + legacySidebar: legacySidebar, + tfVersion: tfVersion, + + providerName: providerName, + renderedProviderName: renderedProviderName, + renderedWebsiteDir: renderedWebsiteDir, + examplesDir: examplesDir, + websiteTmpDir: websiteTmpDir, + websiteSourceDir: websiteSourceDir, + + ui: ui, + } + + ctx := context.Background() + + return g.Generate(ctx) +} + +func (g *generator) Generate(ctx context.Context) error { + var err error + + wd, err := os.Getwd() + if err != nil { + return err + } + + providerName := g.providerName + if g.providerName == "" { + providerName = filepath.Base(wd) + } + + if g.renderedProviderName == "" { + g.renderedProviderName = providerName + } + + g.infof("rendering website for provider %q (as %q)", providerName, g.renderedProviderName) + + switch { + case g.websiteTmpDir == "": + g.websiteTmpDir, err = ioutil.TempDir("", "tfws") + if err != nil { + return err + } + defer os.RemoveAll(g.websiteTmpDir) + default: + g.infof("cleaning tmp dir %q", g.websiteTmpDir) + err = os.RemoveAll(g.websiteTmpDir) + if err != nil { + return err + } + + g.infof("creating tmp dir %q", g.websiteTmpDir) + err = os.MkdirAll(g.websiteTmpDir, 0755) + if err != nil { + return err + } + } + + websiteSourceDirInfo, err := os.Stat(g.websiteSourceDir) + switch { + case os.IsNotExist(err): + // do nothing, no template dir + case err != nil: + return err + default: + if !websiteSourceDirInfo.IsDir() { + return fmt.Errorf("template path is not a directory: %s", g.websiteSourceDir) + } + + g.infof("copying any existing content to tmp dir") + err = cp(g.websiteSourceDir, filepath.Join(g.websiteTmpDir, "templates")) + if err != nil { + return err + } + } + + g.infof("exporting schema from Terraform") + providerSchema, err := g.terraformProviderSchema(ctx, providerName) + if err != nil { + return err + } + + g.infof("rendering missing docs") + err = g.renderMissingDocs(providerName, providerSchema) + if err != nil { + return err + } + + g.infof("rendering static website") + err = g.renderStaticWebsite(providerName, providerSchema) + if err != nil { + return err + } + + // TODO: may not ever need this, unsure on when this will go live + if g.legacySidebar { + g.infof("rendering legacy sidebar...") + g.warnf("TODO...!") + } + + return nil +} + +func (g *generator) renderMissingResourceDoc(providerName, name, typeName string, schema *tfjson.Schema, websiteFileTemplate resourceFileTemplate, fallbackWebsiteFileTemplate resourceFileTemplate, websiteStaticCandidateTemplates []resourceFileTemplate, examplesFileTemplate resourceFileTemplate, examplesImportTemplate *resourceFileTemplate) error { + tmplPath, err := websiteFileTemplate.Render(name, providerName) + if err != nil { + return fmt.Errorf("unable to render path for resource %q: %w", name, err) + } + tmplPath = filepath.Join(g.websiteTmpDir, g.websiteSourceDir, tmplPath) + if fileExists(tmplPath) { + g.infof("resource %q template exists, skipping", name) + return nil + } + + for _, candidate := range websiteStaticCandidateTemplates { + candidatePath, err := candidate.Render(name, providerName) + if err != nil { + return fmt.Errorf("unable to render path for resource %q: %w", name, err) + } + candidatePath = filepath.Join(g.websiteTmpDir, g.websiteSourceDir, candidatePath) + if fileExists(candidatePath) { + g.infof("resource %q static file exists, skipping", name) + return nil + } + } + + examplePath, err := examplesFileTemplate.Render(name, providerName) + if err != nil { + return fmt.Errorf("unable to render example file path for %q: %w", name, err) + } + if examplePath != "" { + examplePath = filepath.Join(g.examplesDir, examplePath) + } + if !fileExists(examplePath) { + examplePath = "" + } + + importPath := "" + if examplesImportTemplate != nil { + importPath, err = examplesImportTemplate.Render(name, providerName) + if err != nil { + return fmt.Errorf("unable to render example import file path for %q: %w", name, err) + } + if importPath != "" { + importPath = filepath.Join(g.examplesDir, importPath) + } + if !fileExists(importPath) { + importPath = "" + } + } + + targetResourceTemplate := defaultResourceTemplate + + fallbackTmplPath, err := fallbackWebsiteFileTemplate.Render(name, providerName) + if err != nil { + return fmt.Errorf("unable to render path for resource %q: %w", name, err) + } + fallbackTmplPath = filepath.Join(g.websiteTmpDir, g.websiteSourceDir, fallbackTmplPath) + if fileExists(fallbackTmplPath) { + g.infof("resource %q fallback template exists", name) + tmplData, err := ioutil.ReadFile(fallbackTmplPath) + if err != nil { + return fmt.Errorf("unable to read file %q: %w", fallbackTmplPath, err) + } + targetResourceTemplate = resourceTemplate(tmplData) + } + + g.infof("generating template for %q", name) + md, err := targetResourceTemplate.Render(name, providerName, g.renderedProviderName, typeName, examplePath, importPath, schema) + if err != nil { + return fmt.Errorf("unable to render template for %q: %w", name, err) + } + + err = writeFile(tmplPath, md) + if err != nil { + return fmt.Errorf("unable to write file %q: %w", tmplPath, err) + } + + return nil +} + +func (g *generator) renderMissingProviderDoc(providerName string, schema *tfjson.Schema, websiteFileTemplate providerFileTemplate, websiteStaticCandidateTemplates []providerFileTemplate, examplesFileTemplate providerFileTemplate) error { + tmplPath, err := websiteFileTemplate.Render(providerName) + if err != nil { + return fmt.Errorf("unable to render path for provider %q: %w", providerName, err) + } + tmplPath = filepath.Join(g.websiteTmpDir, g.websiteSourceDir, tmplPath) + if fileExists(tmplPath) { + g.infof("provider %q template exists, skipping", providerName) + return nil + } + + for _, candidate := range websiteStaticCandidateTemplates { + candidatePath, err := candidate.Render(providerName) + if err != nil { + return fmt.Errorf("unable to render path for provider %q: %w", providerName, err) + } + candidatePath = filepath.Join(g.websiteTmpDir, g.websiteSourceDir, candidatePath) + if fileExists(candidatePath) { + g.infof("provider %q static file exists, skipping", providerName) + return nil + } + } + + examplePath, err := examplesFileTemplate.Render(providerName) + if err != nil { + return fmt.Errorf("unable to render example file path for %q: %w", providerName, err) + } + if examplePath != "" { + examplePath = filepath.Join(g.examplesDir, examplePath) + } + if !fileExists(examplePath) { + examplePath = "" + } + + g.infof("generating template for %q", providerName) + md, err := defaultProviderTemplate.Render(providerName, g.renderedProviderName, examplePath, schema) + if err != nil { + return fmt.Errorf("unable to render template for %q: %w", providerName, err) + } + + err = writeFile(tmplPath, md) + if err != nil { + return fmt.Errorf("unable to write file %q: %w", tmplPath, err) + } + + return nil +} + +func (g *generator) renderMissingDocs(providerName string, providerSchema *tfjson.ProviderSchema) error { + g.infof("generating missing resource content") + for name, schema := range providerSchema.ResourceSchemas { + err := g.renderMissingResourceDoc(providerName, name, "Resource", schema, + websiteResourceFileTemplate, + websiteResourceFallbackFileTemplate, + websiteResourceFileStatic, + examplesResourceFileTemplate, + &examplesResourceImportTemplate) + if err != nil { + return fmt.Errorf("unable to render doc %q: %w", name, err) + } + } + + g.infof("generating missing data source content") + for name, schema := range providerSchema.DataSourceSchemas { + err := g.renderMissingResourceDoc(providerName, name, "Data Source", schema, + websiteDataSourceFileTemplate, + websiteDataSourceFallbackFileTemplate, + websiteDataSourceFileStatic, + examplesDataSourceFileTemplate, + nil) + if err != nil { + return fmt.Errorf("unable to render doc %q: %w", name, err) + } + } + + g.infof("generating missing provider content") + err := g.renderMissingProviderDoc(providerName, providerSchema.ConfigSchema, + websiteProviderFileTemplate, + websiteProviderFileStatic, + examplesProviderFileTemplate, + ) + if err != nil { + return fmt.Errorf("unable to render provider doc: %w", err) + } + + return nil +} + +func (g *generator) renderStaticWebsite(providerName string, providerSchema *tfjson.ProviderSchema) error { + g.infof("cleaning rendered website dir") + err := os.RemoveAll(g.renderedWebsiteDir) + if err != nil { + return err + } + + shortName := providerShortName(providerName) + + g.infof("rendering templated website to static markdown") + + err = filepath.Walk(g.websiteTmpDir, func(path string, info os.FileInfo, err error) error { + if info.IsDir() { + // skip directories + return nil + } + + rel, err := filepath.Rel(filepath.Join(g.websiteTmpDir, g.websiteSourceDir), path) + if err != nil { + return err + } + + relDir, relFile := filepath.Split(rel) + relDir = filepath.ToSlash(relDir) + + // skip special top-level generic resource and data source templates + if relDir == "" && (relFile == "resources.md.tmpl" || relFile == "data-sources.md.tmpl") { + return nil + } + + renderedPath := filepath.Join(g.renderedWebsiteDir, rel) + err = os.MkdirAll(filepath.Dir(renderedPath), 0755) + if err != nil { + return err + } + + ext := filepath.Ext(path) + if ext != ".tmpl" { + g.infof("copying non-template file: %q", rel) + return cp(path, renderedPath) + } + + renderedPath = strings.TrimSuffix(renderedPath, ext) + + tmplData, err := ioutil.ReadFile(path) + if err != nil { + return fmt.Errorf("unable to read file %q: %w", rel, err) + } + + out, err := os.Create(renderedPath) + if err != nil { + return err + } + defer out.Close() + + g.infof("rendering %q", rel) + switch relDir { + case "data-sources/": + resSchema, resName := resourceSchema(providerSchema.DataSourceSchemas, shortName, relFile) + if resSchema != nil { + tmpl := resourceTemplate(tmplData) + render, err := tmpl.Render(resName, providerName, g.renderedProviderName, "Data Source", "", "", resSchema) + if err != nil { + return fmt.Errorf("unable to render data source template %q: %w", rel, err) + } + _, err = out.WriteString(render) + if err != nil { + return fmt.Errorf("unable to write rendered string: %w", err) + } + return nil + } + g.warnf("data source entitled %q, or %q does not exist", shortName, resName) + case "resources/": + resSchema, resName := resourceSchema(providerSchema.ResourceSchemas, shortName, relFile) + if resSchema != nil { + tmpl := resourceTemplate(tmplData) + render, err := tmpl.Render(resName, providerName, g.renderedProviderName, "Resource", "", "", resSchema) + if err != nil { + return fmt.Errorf("unable to render resource template %q: %w", rel, err) + } + _, err = out.WriteString(render) + if err != nil { + return fmt.Errorf("unable to write regindered string: %w", err) + } + return nil + } + g.warnf("resource entitled %q, or %q does not exist", shortName, resName) + case "": // provider + if relFile == "index.md.tmpl" { + tmpl := providerTemplate(tmplData) + render, err := tmpl.Render(providerName, g.renderedProviderName, "", providerSchema.ConfigSchema) + if err != nil { + return fmt.Errorf("unable to render provider template %q: %w", rel, err) + } + _, err = out.WriteString(render) + if err != nil { + return fmt.Errorf("unable to write rendered string: %w", err) + } + return nil + } + } + + tmpl := docTemplate(tmplData) + err = tmpl.Render(out) + if err != nil { + return fmt.Errorf("unable to render template %q: %w", rel, err) + } + return nil + }) + if err != nil { + return err + } + + return nil +} + +func (g *generator) terraformProviderSchema(ctx context.Context, providerName string) (*tfjson.ProviderSchema, error) { + var err error + + shortName := providerShortName(providerName) + + tmpDir, err := ioutil.TempDir("", "tfws") + if err != nil { + return nil, err + } + defer os.RemoveAll(tmpDir) + + // tmpDir := "/tmp/tftmp" + // os.RemoveAll(tmpDir) + // os.MkdirAll(tmpDir, 0755) + // fmt.Printf("[DEBUG] tmpdir %q\n", tmpDir) + + g.infof("compiling provider %q", shortName) + providerPath := fmt.Sprintf("plugins/registry.terraform.io/hashicorp/%s/0.0.1/%s_%s", shortName, runtime.GOOS, runtime.GOARCH) + outFile := filepath.Join(tmpDir, providerPath, fmt.Sprintf("terraform-provider-%s", shortName)) + switch runtime.GOOS { + case "windows": + outFile = outFile + ".exe" + } + buildCmd := exec.Command("go", "build", "-o", outFile) + // TODO: constrain env here to make it a little safer? + _, err = runCmd(buildCmd) + if err != nil { + return nil, err + } + + err = writeFile(filepath.Join(tmpDir, "provider.tf"), fmt.Sprintf(` +provider %[1]q { +} +`, shortName)) + if err != nil { + return nil, err + } + + i := install.NewInstaller() + var sources []src.Source + if g.tfVersion != "" { + g.infof("downloading Terraform CLI binary version from releases.hashicorp.com: %s", g.tfVersion) + sources = []src.Source{ + &releases.ExactVersion{ + Product: product.Terraform, + Version: version.Must(version.NewVersion(g.tfVersion)), + InstallDir: tmpDir, + }, + } + } else { + g.infof("using Terraform CLI binary from PATH if available, otherwise downloading latest Terraform CLI binary") + sources = []src.Source{ + &fs.AnyVersion{ + Product: &product.Terraform, + }, + &checkpoint.LatestVersion{ + InstallDir: tmpDir, + Product: product.Terraform, + }, + } + } + + tfBin, err := i.Ensure(context.Background(), sources) + if err != nil { + return nil, err + } + + tf, err := tfexec.NewTerraform(tmpDir, tfBin) + if err != nil { + return nil, err + } + + g.infof("running terraform init") + err = tf.Init(ctx, tfexec.Get(false), tfexec.PluginDir("./plugins")) + if err != nil { + return nil, err + } + + g.infof("getting provider schema") + schemas, err := tf.ProvidersSchema(ctx) + if err != nil { + return nil, err + } + + if ps, ok := schemas.Schemas[shortName]; ok { + return ps, nil + } + + if ps, ok := schemas.Schemas["registry.terraform.io/hashicorp/"+shortName]; ok { + return ps, nil + } + + return nil, fmt.Errorf("unable to find schema in JSON for provider %q", shortName) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-docs/internal/provider/template.go b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/provider/template.go new file mode 100644 index 000000000..f1970f781 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/provider/template.go @@ -0,0 +1,261 @@ +package provider + +import ( + "bytes" + "fmt" + "io" + "strings" + "text/template" + + tfjson "github.com/hashicorp/terraform-json" + + "github.com/hashicorp/terraform-plugin-docs/internal/mdplain" + "github.com/hashicorp/terraform-plugin-docs/internal/tmplfuncs" + "github.com/hashicorp/terraform-plugin-docs/schemamd" +) + +const ( + schemaComment = "" + frontmatterComment = "# generated by https://github.com/hashicorp/terraform-plugin-docs" +) + +type ( + resourceTemplate string + providerTemplate string + + resourceFileTemplate string + providerFileTemplate string + + docTemplate string +) + +func newTemplate(name, text string) (*template.Template, error) { + tmpl := template.New(name) + + tmpl.Funcs(template.FuncMap(map[string]interface{}{ + "codefile": tmplfuncs.CodeFile, + "plainmarkdown": mdplain.PlainMarkdown, + "prefixlines": tmplfuncs.PrefixLines, + "tffile": func(file string) (string, error) { + // TODO: omit comment handling + return tmplfuncs.CodeFile("terraform", file) + }, + "trimspace": strings.TrimSpace, + "split": strings.Split, + })) + + var err error + tmpl, err = tmpl.Parse(text) + if err != nil { + return nil, fmt.Errorf("unable to parse template %q: %w", text, err) + } + + return tmpl, nil +} + +func renderTemplate(name string, text string, out io.Writer, data interface{}) error { + tmpl, err := newTemplate(name, text) + if err != nil { + return err + } + + err = tmpl.Execute(out, data) + if err != nil { + return fmt.Errorf("unable to execute template: %w", err) + } + + return nil +} + +func renderStringTemplate(name, text string, data interface{}) (string, error) { + var buf bytes.Buffer + + err := renderTemplate(name, text, &buf, data) + if err != nil { + return "", err + } + + return buf.String(), nil +} + +func (t docTemplate) Render(out io.Writer) error { + s := string(t) + if s == "" { + return nil + } + + return renderTemplate("docTemplate", s, out, nil) +} + +func (t resourceFileTemplate) Render(name, providerName string) (string, error) { + s := string(t) + if s == "" { + return "", nil + } + return renderStringTemplate("resourceFileTemplate", s, struct { + Name string + ShortName string + + ProviderName string + ProviderShortName string + }{ + Name: name, + ShortName: resourceShortName(name, providerName), + + ProviderName: providerName, + ProviderShortName: providerShortName(providerName), + }) +} + +func (t providerFileTemplate) Render(name string) (string, error) { + s := string(t) + if s == "" { + return "", nil + } + return renderStringTemplate("providerFileTemplate", s, struct { + Name string + ShortName string + }{name, providerShortName(name)}) +} + +func (t providerTemplate) Render(providerName, renderedProviderName, exampleFile string, schema *tfjson.Schema) (string, error) { + schemaBuffer := bytes.NewBuffer(nil) + err := schemamd.Render(schema, schemaBuffer) + if err != nil { + return "", fmt.Errorf("unable to render schema: %w", err) + } + + s := string(t) + if s == "" { + return "", nil + } + return renderStringTemplate("providerTemplate", s, struct { + Type string + Name string + Description string + + HasExample bool + ExampleFile string + + HasImport bool + ImportFile string + + ProviderName string + ProviderShortName string + + SchemaMarkdown string + + RenderedProviderName string + }{ + Description: schema.Block.Description, + + HasExample: exampleFile != "", + ExampleFile: exampleFile, + + ProviderName: providerName, + ProviderShortName: providerShortName(providerName), + + SchemaMarkdown: schemaComment + "\n" + schemaBuffer.String(), + + RenderedProviderName: renderedProviderName, + }) +} + +func (t resourceTemplate) Render(name, providerName, renderedProviderName, typeName, exampleFile, importFile string, schema *tfjson.Schema) (string, error) { + schemaBuffer := bytes.NewBuffer(nil) + err := schemamd.Render(schema, schemaBuffer) + if err != nil { + return "", fmt.Errorf("unable to render schema: %w", err) + } + + s := string(t) + if s == "" { + return "", nil + } + + return renderStringTemplate("resourceTemplate", s, struct { + Type string + Name string + Description string + + HasExample bool + ExampleFile string + + HasImport bool + ImportFile string + + ProviderName string + ProviderShortName string + + SchemaMarkdown string + + RenderedProviderName string + }{ + Type: typeName, + Name: name, + Description: schema.Block.Description, + + HasExample: exampleFile != "", + ExampleFile: exampleFile, + + HasImport: importFile != "", + ImportFile: importFile, + + ProviderName: providerName, + ProviderShortName: providerShortName(providerName), + + SchemaMarkdown: schemaComment + "\n" + schemaBuffer.String(), + + RenderedProviderName: renderedProviderName, + }) +} + +const defaultResourceTemplate resourceTemplate = `--- +` + frontmatterComment + ` +page_title: "{{.Name}} {{.Type}} - {{.ProviderName}}" +subcategory: "" +description: |- +{{ .Description | plainmarkdown | trimspace | prefixlines " " }} +--- + +# {{.Name}} ({{.Type}}) + +{{ .Description | trimspace }} + +{{ if .HasExample -}} +## Example Usage + +{{ printf "{{tffile %q}}" .ExampleFile }} +{{- end }} + +{{ .SchemaMarkdown | trimspace }} + +{{ if .HasImport -}} +## Import + +Import is supported using the following syntax: + +{{ printf "{{codefile \"shell\" %q}}" .ImportFile }} +{{- end }} +` + +const defaultProviderTemplate providerTemplate = `--- +` + frontmatterComment + ` +page_title: "{{.ProviderShortName}} Provider" +subcategory: "" +description: |- +{{ .Description | plainmarkdown | trimspace | prefixlines " " }} +--- + +# {{.ProviderShortName}} Provider + +{{ .Description | trimspace }} + +{{ if .HasExample -}} +## Example Usage + +{{ printf "{{tffile %q}}" .ExampleFile }} +{{- end }} + +{{ .SchemaMarkdown | trimspace }} +` diff --git a/vendor/github.com/hashicorp/terraform-plugin-docs/internal/provider/util.go b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/provider/util.go new file mode 100644 index 000000000..1de2ed460 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/provider/util.go @@ -0,0 +1,136 @@ +package provider + +import ( + "fmt" + "io" + "io/ioutil" + "log" + "os" + "os/exec" + "path/filepath" + "strings" + + tfjson "github.com/hashicorp/terraform-json" +) + +func providerShortName(n string) string { + return strings.TrimPrefix(n, "terraform-provider-") +} + +func resourceShortName(name, providerName string) string { + psn := providerShortName(providerName) + return strings.TrimPrefix(name, psn+"_") +} + +func copyFile(srcPath, dstPath string, mode os.FileMode) error { + srcFile, err := os.Open(srcPath) + if err != nil { + return err + } + defer srcFile.Close() + + // If the destination file already exists, we shouldn't blow it away + dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, mode) + if err != nil { + return err + } + defer dstFile.Close() + + _, err = io.Copy(dstFile, srcFile) + if err != nil { + return err + } + + return nil +} + +func removeAllExt(file string) string { + for { + ext := filepath.Ext(file) + if ext == "" || ext == file { + return file + } + file = strings.TrimSuffix(file, ext) + } +} + +// resourceSchema determines whether there is a schema in the supplied schemas map which +// has either the providerShortName or the providerShortName concatenated with the +// templateFileName (stripped of file extension. +func resourceSchema(schemas map[string]*tfjson.Schema, providerShortName, templateFileName string) (*tfjson.Schema, string) { + if schema, ok := schemas[providerShortName]; ok { + return schema, providerShortName + } + + resName := providerShortName + "_" + removeAllExt(templateFileName) + + if schema, ok := schemas[resName]; ok { + return schema, resName + } + + return nil, resName +} + +func writeFile(path string, data string) error { + dir, _ := filepath.Split(path) + err := os.MkdirAll(dir, 0755) + if err != nil { + return fmt.Errorf("unable to make dir %q: %w", dir, err) + } + + err = ioutil.WriteFile(path, []byte(data), 0644) + if err != nil { + return fmt.Errorf("unable to write file %q: %w", path, err) + } + + return nil +} + +func runCmd(cmd *exec.Cmd) ([]byte, error) { + output, err := cmd.CombinedOutput() + if err != nil { + log.Printf("error executing %q, %v", cmd.Path, cmd.Args) + log.Printf(string(output)) + return nil, fmt.Errorf("error executing %q: %w", cmd.Path, err) + } + return output, nil +} + +func cp(srcDir, dstDir string) error { + err := filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + relPath, err := filepath.Rel(srcDir, srcPath) + if err != nil { + return err + } + + dstPath := filepath.Join(dstDir, relPath) + + switch mode := f.Mode(); { + case mode.IsDir(): + if err := os.Mkdir(dstPath, f.Mode()); err != nil && !os.IsExist(err) { + return err + } + case mode.IsRegular(): + if err := copyFile(srcPath, dstPath, mode); err != nil { + return err + } + default: + return fmt.Errorf("unknown file type (%d / %s) for %s", f.Mode(), f.Mode().String(), srcPath) + } + + return nil + }) + return err +} + +func fileExists(filename string) bool { + info, err := os.Stat(filename) + if os.IsNotExist(err) { + return false + } + return !info.IsDir() +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-docs/internal/provider/validate.go b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/provider/validate.go new file mode 100644 index 000000000..5bc536569 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/provider/validate.go @@ -0,0 +1,266 @@ +package provider + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/mitchellh/cli" +) + +func Validate(ui cli.Ui) error { + dirExists := func(name string) bool { + if _, err := os.Stat(name); err != nil { + return false + } + + return true + } + + switch { + default: + ui.Warn("no website detected, exiting") + case dirExists("templates"): + ui.Info("detected templates directory, running checks...") + err := validateTemplates(ui, "templates") + if err != nil { + return err + } + if dirExists("examples") { + ui.Info("detected examples directory for templates, running checks...") + err = validateExamples(ui, "examples") + if err != nil { + return err + } + } + return err + case dirExists("docs"): + ui.Info("detected static docs directory, running checks") + return validateStaticDocs(ui, "docs") + case dirExists("website"): + ui.Info("detected legacy website directory, running checks") + return validateLegacyWebsite(ui, "website") + } + + return nil +} + +func validateExamples(ui cli.Ui, dir string) error { + return nil +} + +func validateTemplates(ui cli.Ui, dir string) error { + checks := []check{ + checkAllowedFiles( + "index.md", + "index.md.tmpl", + ), + checkAllowedDirs( + "data-sources", + "guides", + "resources", + ), + checkBlockedExtensions( + ".html.md.tmpl", + ), + checkAllowedExtensions( + ".md", + ".md.tmpl", + ), + } + issues := []issue{} + for _, c := range checks { + checkIssues, err := c(dir) + if err != nil { + return err + } + issues = append(issues, checkIssues...) + } + for _, issue := range issues { + ui.Warn(fmt.Sprintf("%s: %s", issue.file, issue.message)) + } + if len(issues) > 0 { + return fmt.Errorf("invalid templates directory") + } + return nil +} + +func validateStaticDocs(ui cli.Ui, dir string) error { + checks := []check{ + checkAllowedFiles( + "index.md", + ), + checkAllowedDirs( + "data-sources", + "guides", + "resources", + ), + checkBlockedExtensions( + ".html.md.tmpl", + ".html.md", + ".md.tmpl", + ), + checkAllowedExtensions( + ".md", + ), + } + issues := []issue{} + for _, c := range checks { + checkIssues, err := c(dir) + if err != nil { + return err + } + issues = append(issues, checkIssues...) + } + for _, issue := range issues { + ui.Warn(fmt.Sprintf("%s: %s", issue.file, issue.message)) + } + if len(issues) > 0 { + return fmt.Errorf("invalid templates directory") + } + return nil +} + +func validateLegacyWebsite(ui cli.Ui, dir string) error { + panic("not implemented") +} + +type issue struct { + file string + message string +} + +type check func(dir string) ([]issue, error) + +func checkBlockedExtensions(exts ...string) check { + return func(dir string) ([]issue, error) { + issues := []issue{} + err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + return nil + } + for _, ext := range exts { + if strings.HasSuffix(path, ext) { + _, file := filepath.Split(path) + issues = append(issues, issue{ + file: path, + message: fmt.Sprintf("the extension for %q is not supported", file), + }) + break + } + } + return nil + }) + if err != nil { + return nil, err + } + return issues, nil + } +} + +func checkAllowedExtensions(exts ...string) check { + return func(dir string) ([]issue, error) { + issues := []issue{} + err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + return nil + } + valid := false + for _, ext := range exts { + if strings.HasSuffix(path, ext) { + valid = true + break + } + } + if !valid { + _, file := filepath.Split(path) + issues = append(issues, issue{ + file: path, + message: fmt.Sprintf("the extension for %q is not expected", file), + }) + } + return nil + }) + if err != nil { + return nil, err + } + return issues, nil + } +} + +func checkAllowedDirs(dirs ...string) check { + allowedDirs := map[string]bool{} + for _, d := range dirs { + allowedDirs[d] = true + } + + return func(dir string) ([]issue, error) { + issues := []issue{} + + f, err := os.Open(dir) + if err != nil { + return nil, err + } + infos, err := f.Readdir(-1) + if err != nil { + return nil, err + } + + for _, fi := range infos { + if !fi.IsDir() { + continue + } + + if !allowedDirs[fi.Name()] { + issues = append(issues, issue{ + file: filepath.Join(dir, fi.Name()), + message: fmt.Sprintf("directory %q is not allowed", fi.Name()), + }) + } + } + + return issues, nil + } +} + +func checkAllowedFiles(dirs ...string) check { + allowedFiles := map[string]bool{} + for _, d := range dirs { + allowedFiles[d] = true + } + + return func(dir string) ([]issue, error) { + issues := []issue{} + + f, err := os.Open(dir) + if err != nil { + return nil, err + } + infos, err := f.Readdir(-1) + if err != nil { + return nil, err + } + + for _, fi := range infos { + if fi.IsDir() { + continue + } + + if !allowedFiles[fi.Name()] { + issues = append(issues, issue{ + file: filepath.Join(dir, fi.Name()), + message: fmt.Sprintf("file %q is not allowed", fi.Name()), + }) + } + } + + return issues, nil + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-docs/internal/tmplfuncs/tmplfuncs.go b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/tmplfuncs/tmplfuncs.go new file mode 100644 index 000000000..67ffa8f38 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-docs/internal/tmplfuncs/tmplfuncs.go @@ -0,0 +1,51 @@ +package tmplfuncs + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" +) + +func PrefixLines(prefix, text string) string { + return prefix + strings.Join(strings.Split(text, "\n"), "\n"+prefix) +} + +func CodeFile(format, file string) (string, error) { + // paths are relative to the rendering process work dir, which + // may be undesirable, probably need to think about it + wd, err := os.Getwd() + if err != nil { + return "", err + } + + fullPath := filepath.Join(wd, file) + content, err := ioutil.ReadFile(fullPath) + if err != nil { + return "", fmt.Errorf("unable to read content from %q: %w", file, err) + } + + sContent := strings.TrimSpace(string(content)) + if sContent == "" { + return "", fmt.Errorf("no file content in %q", file) + } + + md := &strings.Builder{} + _, err = md.WriteString("```" + format + "\n") + if err != nil { + return "", err + } + + _, err = md.WriteString(sContent) + if err != nil { + return "", err + } + + _, err = md.WriteString("\n```") + if err != nil { + return "", err + } + + return md.String(), nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-docs/schemamd/behaviors.go b/vendor/github.com/hashicorp/terraform-plugin-docs/schemamd/behaviors.go new file mode 100644 index 000000000..954225ea4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-docs/schemamd/behaviors.go @@ -0,0 +1,76 @@ +package schemamd + +import ( + tfjson "github.com/hashicorp/terraform-json" +) + +func childAttributeIsRequired(att *tfjson.SchemaAttribute) bool { + return att.Required +} + +func childBlockIsRequired(block *tfjson.SchemaBlockType) bool { + return block.MinItems > 0 +} + +func childAttributeIsOptional(att *tfjson.SchemaAttribute) bool { + return att.Optional +} + +// childBlockIsOptional returns true for blocks with with min items 0 +// which are either empty or have any required or optional children. +func childBlockIsOptional(block *tfjson.SchemaBlockType) bool { + if block.MinItems > 0 { + return false + } + + if len(block.Block.NestedBlocks) == 0 && len(block.Block.Attributes) == 0 { + return true + } + + for _, childBlock := range block.Block.NestedBlocks { + if childBlockIsRequired(childBlock) { + return true + } + if childBlockIsOptional(childBlock) { + return true + } + } + + for _, childAtt := range block.Block.Attributes { + if childAttributeIsRequired(childAtt) { + return true + } + if childAttributeIsOptional(childAtt) { + return true + } + } + + return false +} + +// Read-only is computed but not optional. +func childAttributeIsReadOnly(att *tfjson.SchemaAttribute) bool { + // these shouldn't be able to be required, but just in case + return att.Computed && !att.Optional && !att.Required +} + +// childBlockIsReadOnly returns true for blocks where all leaves are read-only. +func childBlockIsReadOnly(block *tfjson.SchemaBlockType) bool { + if block.MinItems != 0 || block.MaxItems != 0 { + return false + } + + for _, childBlock := range block.Block.NestedBlocks { + if !childBlockIsReadOnly(childBlock) { + return false + } + } + + for _, childAtt := range block.Block.Attributes { + if !childAttributeIsReadOnly(childAtt) { + return false + } + } + + return true +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-docs/schemamd/render.go b/vendor/github.com/hashicorp/terraform-plugin-docs/schemamd/render.go new file mode 100644 index 000000000..9415aa080 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-docs/schemamd/render.go @@ -0,0 +1,511 @@ +package schemamd + +import ( + "fmt" + "io" + "sort" + "strings" + + tfjson "github.com/hashicorp/terraform-json" + "github.com/zclconf/go-cty/cty" +) + +// Render writes a Markdown formatted Schema definition to the specified writer. +// A Schema contains a Version and the root Block, for example: +// "aws_accessanalyzer_analyzer": { +// "block": { +// }, +// "version": 0 +// }, +func Render(schema *tfjson.Schema, w io.Writer) error { + _, err := io.WriteString(w, "## Schema\n\n") + if err != nil { + return err + } + + err = writeRootBlock(w, schema.Block) + if err != nil { + return fmt.Errorf("unable to render schema: %w", err) + } + + return nil +} + +// Group by Attribute/Block characteristics. +type groupFilter struct { + topLevelTitle string + nestedTitle string + + filterAttribute func(att *tfjson.SchemaAttribute) bool + filterBlock func(block *tfjson.SchemaBlockType) bool +} + +var ( + // Attributes and Blocks are in one of 3 characteristic groups: + // * Required + // * Optional + // * Read-Only + groupFilters = []groupFilter{ + {"### Required", "Required:", childAttributeIsRequired, childBlockIsRequired}, + {"### Optional", "Optional:", childAttributeIsOptional, childBlockIsOptional}, + {"### Read-Only", "Read-Only:", childAttributeIsReadOnly, childBlockIsReadOnly}, + } +) + +type nestedType struct { + anchorID string + path []string + block *tfjson.SchemaBlock + object *cty.Type + attrs *tfjson.SchemaNestedAttributeType + + group groupFilter +} + +func writeAttribute(w io.Writer, path []string, att *tfjson.SchemaAttribute, group groupFilter) ([]nestedType, error) { + name := path[len(path)-1] + + _, err := io.WriteString(w, "- `"+name+"` ") + if err != nil { + return nil, err + } + + if att.AttributeNestedType == nil { + err = WriteAttributeDescription(w, att, false) + } else { + err = WriteNestedAttributeTypeDescription(w, att, false) + } + if err != nil { + return nil, err + } + if att.AttributeType.IsTupleType() { + return nil, fmt.Errorf("TODO: tuples are not yet supported") + } + + anchorID := "nestedatt--" + strings.Join(path, "--") + nestedTypes := []nestedType{} + switch { + case att.AttributeNestedType != nil: + _, err = io.WriteString(w, " (see [below for nested schema](#"+anchorID+"))") + if err != nil { + return nil, err + } + + nestedTypes = append(nestedTypes, nestedType{ + anchorID: anchorID, + path: path, + attrs: att.AttributeNestedType, + + group: group, + }) + case att.AttributeType.IsObjectType(): + _, err = io.WriteString(w, " (see [below for nested schema](#"+anchorID+"))") + if err != nil { + return nil, err + } + + nestedTypes = append(nestedTypes, nestedType{ + anchorID: anchorID, + path: path, + object: &att.AttributeType, + + group: group, + }) + case att.AttributeType.IsCollectionType() && att.AttributeType.ElementType().IsObjectType(): + _, err = io.WriteString(w, " (see [below for nested schema](#"+anchorID+"))") + if err != nil { + return nil, err + } + + nt := att.AttributeType.ElementType() + nestedTypes = append(nestedTypes, nestedType{ + anchorID: anchorID, + path: path, + object: &nt, + + group: group, + }) + } + + _, err = io.WriteString(w, "\n") + if err != nil { + return nil, err + } + + return nestedTypes, nil +} + +func writeBlockType(w io.Writer, path []string, block *tfjson.SchemaBlockType) ([]nestedType, error) { + name := path[len(path)-1] + + _, err := io.WriteString(w, "- `"+name+"` ") + if err != nil { + return nil, err + } + + err = WriteBlockTypeDescription(w, block) + if err != nil { + return nil, fmt.Errorf("unable to write block description for %q: %w", name, err) + } + + anchorID := "nestedblock--" + strings.Join(path, "--") + nt := nestedType{ + anchorID: anchorID, + path: path, + block: block.Block, + } + + _, err = io.WriteString(w, " (see [below for nested schema](#"+anchorID+"))") + if err != nil { + return nil, err + } + + _, err = io.WriteString(w, "\n") + if err != nil { + return nil, err + } + + return []nestedType{nt}, nil +} + +func writeRootBlock(w io.Writer, block *tfjson.SchemaBlock) error { + return writeBlockChildren(w, nil, block, true) +} + +// A Block contains: +// * Attributes (arbitrarily nested) +// * Nested Blocks (with nesting mode, max and min items) +// * Description(Kind) +// * Deprecated flag +// For example: +// "block": { +// "attributes": { +// "certificate_arn": { +// "description_kind": "plain", +// "required": true, +// "type": "string" +// } +// }, +// "block_types": { +// "timeouts": { +// "block": { +// "attributes": { +// }, +// "description_kind": "plain" +// }, +// "nesting_mode": "single" +// } +// }, +// "description_kind": "plain" +// }, +func writeBlockChildren(w io.Writer, parents []string, block *tfjson.SchemaBlock, root bool) error { + names := []string{} + for n := range block.Attributes { + names = append(names, n) + } + for n := range block.NestedBlocks { + names = append(names, n) + } + + groups := map[int][]string{} + + // Group Attributes/Blocks by characteristics. +nameLoop: + for _, n := range names { + if childBlock, ok := block.NestedBlocks[n]; ok { + for i, gf := range groupFilters { + if gf.filterBlock(childBlock) { + groups[i] = append(groups[i], n) + continue nameLoop + } + } + } else if childAtt, ok := block.Attributes[n]; ok { + for i, gf := range groupFilters { + // By default, the attribute `id` is place in the "Read-Only" group + // if the provider schema contained no `.Description` for it. + // + // If a `.Description` is provided instead, the behaviour will be the + // same as for every other attribute. + if strings.ToLower(n) == "id" && childAtt.Description == "" { + if strings.Contains(gf.topLevelTitle, "Read-Only") { + childAtt.Description = "The ID of this resource." + groups[i] = append(groups[i], n) + continue nameLoop + } + } else if gf.filterAttribute(childAtt) { + groups[i] = append(groups[i], n) + continue nameLoop + } + } + } + + return fmt.Errorf("no match for %q, this can happen if you have incompatible schema defined, for example an "+ + "optional block where all the child attributes are computed, in which case the block itself should also "+ + "be marked computed", n) + } + + nestedTypes := []nestedType{} + + // For each characteristic group + // If Attribute + // Write out summary including characteristic and type (if primitive type or collection of primitives) + // If NestedAttribute type, Object type or collection of Objects, add to list of nested types + // ElseIf Block + // Write out summary including characteristic + // Add block to list of nested types + // End + // End + // For each nested type: + // Write out heading + // If Block + // Recursively call this function (writeBlockChildren) + // ElseIf Object + // Call writeObjectChildren, which + // For each Object Attribute + // Write out summary including characteristic and type (if primitive type or collection of primitives) + // If Object type or collection of Objects, add to list of nested types + // End + // Recursively do nested type functionality + // ElseIf NestedAttribute + // Call writeNestedAttributeChildren, which + // For each nested Attribute + // Write out summary including characteristic and type (if primitive type or collection of primitives) + // If NestedAttribute type, Object type or collection of Objects, add to list of nested types + // End + // Recursively do nested type functionality + // End + // End + for i, gf := range groupFilters { + sortedNames := groups[i] + if len(sortedNames) == 0 { + continue + } + sort.Strings(sortedNames) + + groupTitle := gf.topLevelTitle + if !root { + groupTitle = gf.nestedTitle + } + + _, err := io.WriteString(w, groupTitle+"\n\n") + if err != nil { + return err + } + + for _, name := range sortedNames { + path := append(parents, name) + + if childBlock, ok := block.NestedBlocks[name]; ok { + nt, err := writeBlockType(w, path, childBlock) + if err != nil { + return fmt.Errorf("unable to render block %q: %w", name, err) + } + + nestedTypes = append(nestedTypes, nt...) + continue + } else if childAtt, ok := block.Attributes[name]; ok { + nt, err := writeAttribute(w, path, childAtt, gf) + if err != nil { + return fmt.Errorf("unable to render attribute %q: %w", name, err) + } + + nestedTypes = append(nestedTypes, nt...) + continue + } + + return fmt.Errorf("unexpected name in schema render %q", name) + } + + _, err = io.WriteString(w, "\n") + if err != nil { + return err + } + } + + err := writeNestedTypes(w, nestedTypes) + if err != nil { + return err + } + + return nil +} + +func writeNestedTypes(w io.Writer, nestedTypes []nestedType) error { + for _, nt := range nestedTypes { + _, err := io.WriteString(w, "\n") + if err != nil { + return err + } + + _, err = io.WriteString(w, "### Nested Schema for `"+strings.Join(nt.path, ".")+"`\n\n") + if err != nil { + return err + } + + switch { + case nt.block != nil: + err = writeBlockChildren(w, nt.path, nt.block, false) + if err != nil { + return err + } + case nt.object != nil: + err = writeObjectChildren(w, nt.path, *nt.object, nt.group) + if err != nil { + return err + } + case nt.attrs != nil: + err = writeNestedAttributeChildren(w, nt.path, nt.attrs, nt.group) + if err != nil { + return err + } + default: + return fmt.Errorf("missing information on nested block: %s", strings.Join(nt.path, ".")) + } + + _, err = io.WriteString(w, "\n") + if err != nil { + return err + } + } + + return nil +} + +func writeObjectAttribute(w io.Writer, path []string, att cty.Type, group groupFilter) ([]nestedType, error) { + name := path[len(path)-1] + + _, err := io.WriteString(w, "- `"+name+"` (") + if err != nil { + return nil, err + } + + err = WriteType(w, att) + if err != nil { + return nil, err + } + + _, err = io.WriteString(w, ")") + if err != nil { + return nil, err + } + + if att.IsTupleType() { + return nil, fmt.Errorf("TODO: tuples are not yet supported") + } + + anchorID := "nestedobjatt--" + strings.Join(path, "--") + nestedTypes := []nestedType{} + switch { + case att.IsObjectType(): + _, err = io.WriteString(w, " (see [below for nested schema](#"+anchorID+"))") + if err != nil { + return nil, err + } + + nestedTypes = append(nestedTypes, nestedType{ + anchorID: anchorID, + path: path, + object: &att, + + group: group, + }) + case att.IsCollectionType() && att.ElementType().IsObjectType(): + _, err = io.WriteString(w, " (see [below for nested schema](#"+anchorID+"))") + if err != nil { + return nil, err + } + + nt := att.ElementType() + nestedTypes = append(nestedTypes, nestedType{ + anchorID: anchorID, + path: path, + object: &nt, + + group: group, + }) + } + + _, err = io.WriteString(w, "\n") + if err != nil { + return nil, err + } + + return nestedTypes, nil +} + +func writeObjectChildren(w io.Writer, parents []string, ty cty.Type, group groupFilter) error { + _, err := io.WriteString(w, group.nestedTitle+"\n\n") + if err != nil { + return err + } + + atts := ty.AttributeTypes() + sortedNames := []string{} + for n := range atts { + sortedNames = append(sortedNames, n) + } + sort.Strings(sortedNames) + nestedTypes := []nestedType{} + + for _, name := range sortedNames { + att := atts[name] + path := append(parents, name) + + nt, err := writeObjectAttribute(w, path, att, group) + if err != nil { + return fmt.Errorf("unable to render attribute %q: %w", name, err) + } + + nestedTypes = append(nestedTypes, nt...) + } + + _, err = io.WriteString(w, "\n") + if err != nil { + return err + } + + err = writeNestedTypes(w, nestedTypes) + if err != nil { + return err + } + + return nil +} + +func writeNestedAttributeChildren(w io.Writer, parents []string, nestedAttributes *tfjson.SchemaNestedAttributeType, group groupFilter) error { + _, err := io.WriteString(w, group.nestedTitle+"\n\n") + if err != nil { + return err + } + + sortedNames := []string{} + for n := range nestedAttributes.Attributes { + sortedNames = append(sortedNames, n) + } + sort.Strings(sortedNames) + nestedTypes := []nestedType{} + + for _, name := range sortedNames { + att := nestedAttributes.Attributes[name] + path := append(parents, name) + + nt, err := writeAttribute(w, path, att, group) + if err != nil { + return fmt.Errorf("unable to render attribute %q: %w", name, err) + } + + nestedTypes = append(nestedTypes, nt...) + } + + _, err = io.WriteString(w, "\n") + if err != nil { + return err + } + + err = writeNestedTypes(w, nestedTypes) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-docs/schemamd/write_attribute_description.go b/vendor/github.com/hashicorp/terraform-plugin-docs/schemamd/write_attribute_description.go new file mode 100644 index 000000000..13517b532 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-docs/schemamd/write_attribute_description.go @@ -0,0 +1,72 @@ +package schemamd + +import ( + "fmt" + "io" + "strings" + + tfjson "github.com/hashicorp/terraform-json" +) + +func WriteAttributeDescription(w io.Writer, att *tfjson.SchemaAttribute, includeRW bool) error { + _, err := io.WriteString(w, "(") + if err != nil { + return err + } + + err = WriteType(w, att.AttributeType) + if err != nil { + return err + } + + if includeRW { + switch { + case childAttributeIsRequired(att): + _, err = io.WriteString(w, ", Required") + if err != nil { + return err + } + case childAttributeIsOptional(att): + _, err = io.WriteString(w, ", Optional") + if err != nil { + return err + } + case childAttributeIsReadOnly(att): + _, err = io.WriteString(w, ", Read-only") + if err != nil { + return err + } + default: + return fmt.Errorf("attribute does not match any filter states") + } + } + + if att.Sensitive { + _, err := io.WriteString(w, ", Sensitive") + if err != nil { + return err + } + } + + if att.Deprecated { + _, err := io.WriteString(w, ", Deprecated") + if err != nil { + return err + } + } + + _, err = io.WriteString(w, ")") + if err != nil { + return err + } + + desc := strings.TrimSpace(att.Description) + if desc != "" { + _, err = io.WriteString(w, " "+desc) + if err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-docs/schemamd/write_block_type_description.go b/vendor/github.com/hashicorp/terraform-plugin-docs/schemamd/write_block_type_description.go new file mode 100644 index 000000000..14a18153c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-docs/schemamd/write_block_type_description.go @@ -0,0 +1,96 @@ +package schemamd + +import ( + "fmt" + "io" + "strings" + + tfjson "github.com/hashicorp/terraform-json" +) + +func WriteBlockTypeDescription(w io.Writer, block *tfjson.SchemaBlockType) error { + _, err := io.WriteString(w, "(Block") + if err != nil { + return err + } + + switch block.NestingMode { + default: + return fmt.Errorf("unexpected nesting mode for block: %s", block.NestingMode) + case tfjson.SchemaNestingModeSingle: + // nothing + case tfjson.SchemaNestingModeList: + _, err = io.WriteString(w, " List") + if err != nil { + return err + } + case tfjson.SchemaNestingModeSet: + _, err = io.WriteString(w, " Set") + if err != nil { + return err + } + case tfjson.SchemaNestingModeMap: + _, err = io.WriteString(w, " Map") + if err != nil { + return err + } + } + + if block.NestingMode == tfjson.SchemaNestingModeSingle { + switch { + case childBlockIsRequired(block): + _, err = io.WriteString(w, ", Required") + if err != nil { + return err + } + case childBlockIsOptional(block): + _, err = io.WriteString(w, ", Optional") + if err != nil { + return err + } + case childBlockIsReadOnly(block): + _, err = io.WriteString(w, ", Read-only") + if err != nil { + return err + } + default: + return fmt.Errorf("block does not match any filter states") + } + } else { + if block.MinItems > 0 { + _, err = io.WriteString(w, fmt.Sprintf(", Min: %d", block.MinItems)) + if err != nil { + return err + } + } + } + + if block.MaxItems > 0 { + _, err = io.WriteString(w, fmt.Sprintf(", Max: %d", block.MaxItems)) + if err != nil { + return err + } + } + + if block.Block.Deprecated { + _, err = io.WriteString(w, ", Deprecated") + if err != nil { + return err + } + } + + _, err = io.WriteString(w, ")") + if err != nil { + return err + } + + desc := strings.TrimSpace(block.Block.Description) + if desc != "" { + _, err = io.WriteString(w, " "+desc) + if err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-docs/schemamd/write_nested_attribute_type_description.go b/vendor/github.com/hashicorp/terraform-plugin-docs/schemamd/write_nested_attribute_type_description.go new file mode 100644 index 000000000..f9e90a579 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-docs/schemamd/write_nested_attribute_type_description.go @@ -0,0 +1,111 @@ +package schemamd + +import ( + "fmt" + "io" + "strings" + + tfjson "github.com/hashicorp/terraform-json" +) + +func WriteNestedAttributeTypeDescription(w io.Writer, att *tfjson.SchemaAttribute, includeRW bool) error { + nestedAttributeType := att.AttributeNestedType + if nestedAttributeType == nil { + return fmt.Errorf("AttributeNestedType is nil") + } + + _, err := io.WriteString(w, "(Attributes") + if err != nil { + return err + } + + nestingMode := nestedAttributeType.NestingMode + switch nestingMode { + default: + return fmt.Errorf("unexpected nesting mode for attributes: %s", nestingMode) + case tfjson.SchemaNestingModeSingle: + // nothing + case tfjson.SchemaNestingModeList: + _, err = io.WriteString(w, " List") + if err != nil { + return err + } + case tfjson.SchemaNestingModeSet: + _, err = io.WriteString(w, " Set") + if err != nil { + return err + } + case tfjson.SchemaNestingModeMap: + _, err = io.WriteString(w, " Map") + if err != nil { + return err + } + } + + if nestingMode == tfjson.SchemaNestingModeSingle { + if includeRW { + switch { + case childAttributeIsRequired(att): + _, err = io.WriteString(w, ", Required") + if err != nil { + return err + } + case childAttributeIsOptional(att): + _, err = io.WriteString(w, ", Optional") + if err != nil { + return err + } + case childAttributeIsReadOnly(att): + _, err = io.WriteString(w, ", Read-only") + if err != nil { + return err + } + default: + return fmt.Errorf("attribute does not match any filter states") + } + } + } else { + if nestedAttributeType.MinItems > 0 { + _, err = io.WriteString(w, fmt.Sprintf(", Min: %d", nestedAttributeType.MinItems)) + if err != nil { + return err + } + } + } + + if nestedAttributeType.MaxItems > 0 { + _, err = io.WriteString(w, fmt.Sprintf(", Max: %d", nestedAttributeType.MaxItems)) + if err != nil { + return err + } + } + + if att.Sensitive { + _, err := io.WriteString(w, ", Sensitive") + if err != nil { + return err + } + } + + if att.Deprecated { + _, err = io.WriteString(w, ", Deprecated") + if err != nil { + return err + } + } + + _, err = io.WriteString(w, ")") + if err != nil { + return err + } + + desc := strings.TrimSpace(att.Description) + if desc != "" { + _, err = io.WriteString(w, " "+desc) + if err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-docs/schemamd/write_type.go b/vendor/github.com/hashicorp/terraform-plugin-docs/schemamd/write_type.go new file mode 100644 index 000000000..0250edd8b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-docs/schemamd/write_type.go @@ -0,0 +1,62 @@ +package schemamd + +import ( + "fmt" + "io" + + "github.com/zclconf/go-cty/cty" +) + +func WriteType(w io.Writer, ty cty.Type) error { + switch { + case ty == cty.DynamicPseudoType: + _, err := io.WriteString(w, "Dynamic") + return err + case ty.IsPrimitiveType(): + switch ty { + case cty.String: + _, err := io.WriteString(w, "String") + return err + case cty.Bool: + _, err := io.WriteString(w, "Boolean") + return err + case cty.Number: + _, err := io.WriteString(w, "Number") + return err + } + return fmt.Errorf("unexpected primitive type %q", ty.FriendlyName()) + case ty.IsCollectionType(): + switch { + default: + return fmt.Errorf("unexpected collection type %q", ty.FriendlyName()) + case ty.IsListType(): + _, err := io.WriteString(w, "List of ") + if err != nil { + return err + } + case ty.IsSetType(): + _, err := io.WriteString(w, "Set of ") + if err != nil { + return err + } + case ty.IsMapType(): + _, err := io.WriteString(w, "Map of ") + if err != nil { + return err + } + } + err := WriteType(w, ty.ElementType()) + if err != nil { + return fmt.Errorf("unable to write element type for %q: %w", ty.FriendlyName(), err) + } + return nil + case ty.IsTupleType(): + // TODO: write additional type info? + _, err := io.WriteString(w, "Tuple") + return err + case ty.IsObjectType(): + _, err := io.WriteString(w, "Object") + return err + } + return fmt.Errorf("unexpected type %q", ty.FriendlyName()) +} diff --git a/vendor/github.com/huandu/xstrings/.gitignore b/vendor/github.com/huandu/xstrings/.gitignore new file mode 100644 index 000000000..daf913b1b --- /dev/null +++ b/vendor/github.com/huandu/xstrings/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/huandu/xstrings/.travis.yml b/vendor/github.com/huandu/xstrings/.travis.yml new file mode 100644 index 000000000..d6460be41 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/.travis.yml @@ -0,0 +1,7 @@ +language: go +install: + - go get golang.org/x/tools/cmd/cover + - go get github.com/mattn/goveralls +script: + - go test -v -covermode=count -coverprofile=coverage.out + - 'if [ "$TRAVIS_PULL_REQUEST" = "false" ] && [ ! -z "$COVERALLS_TOKEN" ]; then $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci -repotoken $COVERALLS_TOKEN; fi' diff --git a/vendor/github.com/huandu/xstrings/CONTRIBUTING.md b/vendor/github.com/huandu/xstrings/CONTRIBUTING.md new file mode 100644 index 000000000..d7b4b8d58 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/CONTRIBUTING.md @@ -0,0 +1,23 @@ +# Contributing # + +Thanks for your contribution in advance. No matter what you will contribute to this project, pull request or bug report or feature discussion, it's always highly appreciated. + +## New API or feature ## + +I want to speak more about how to add new functions to this package. + +Package `xstring` is a collection of useful string functions which should be implemented in Go. It's a bit subject to say which function should be included and which should not. I set up following rules in order to make it clear and as objective as possible. + +* Rule 1: Only string algorithm, which takes string as input, can be included. +* Rule 2: If a function has been implemented in package `string`, it must not be included. +* Rule 3: If a function is not language neutral, it must not be included. +* Rule 4: If a function is a part of standard library in other languages, it can be included. +* Rule 5: If a function is quite useful in some famous framework or library, it can be included. + +New function must be discussed in project issues before submitting any code. If a pull request with new functions is sent without any ref issue, it will be rejected. + +## Pull request ## + +Pull request is always welcome. Just make sure you have run `go fmt` and all test cases passed before submit. + +If the pull request is to add a new API or feature, don't forget to update README.md and add new API in function list. diff --git a/vendor/github.com/huandu/xstrings/LICENSE b/vendor/github.com/huandu/xstrings/LICENSE new file mode 100644 index 000000000..270177259 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Huan Du + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/huandu/xstrings/README.md b/vendor/github.com/huandu/xstrings/README.md new file mode 100644 index 000000000..292bf2f39 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/README.md @@ -0,0 +1,117 @@ +# xstrings # + +[![Build Status](https://travis-ci.org/huandu/xstrings.svg?branch=master)](https://travis-ci.org/huandu/xstrings) +[![GoDoc](https://godoc.org/github.com/huandu/xstrings?status.svg)](https://godoc.org/github.com/huandu/xstrings) +[![Go Report](https://goreportcard.com/badge/github.com/huandu/xstrings)](https://goreportcard.com/report/github.com/huandu/xstrings) +[![Coverage Status](https://coveralls.io/repos/github/huandu/xstrings/badge.svg?branch=master)](https://coveralls.io/github/huandu/xstrings?branch=master) + +Go package [xstrings](https://godoc.org/github.com/huandu/xstrings) is a collection of string functions, which are widely used in other languages but absent in Go package [strings](http://golang.org/pkg/strings). + +All functions are well tested and carefully tuned for performance. + +## Propose a new function ## + +Please review [contributing guideline](CONTRIBUTING.md) and [create new issue](https://github.com/huandu/xstrings/issues) to state why it should be included. + +## Install ## + +Use `go get` to install this library. + + go get github.com/huandu/xstrings + +## API document ## + +See [GoDoc](https://godoc.org/github.com/huandu/xstrings) for full document. + +## Function list ## + +Go functions have a unique naming style. One, who has experience in other language but new in Go, may have difficulties to find out right string function to use. + +Here is a list of functions in [strings](http://golang.org/pkg/strings) and [xstrings](https://godoc.org/github.com/huandu/xstrings) with enough extra information about how to map these functions to their friends in other languages. Hope this list could be helpful for fresh gophers. + +### Package `xstrings` functions ### + +*Keep this table sorted by Function in ascending order.* + +| Function | Friends | # | +| -------- | ------- | --- | +| [Center](https://godoc.org/github.com/huandu/xstrings#Center) | `str.center` in Python; `String#center` in Ruby | [#30](https://github.com/huandu/xstrings/issues/30) | +| [Count](https://godoc.org/github.com/huandu/xstrings#Count) | `String#count` in Ruby | [#16](https://github.com/huandu/xstrings/issues/16) | +| [Delete](https://godoc.org/github.com/huandu/xstrings#Delete) | `String#delete` in Ruby | [#17](https://github.com/huandu/xstrings/issues/17) | +| [ExpandTabs](https://godoc.org/github.com/huandu/xstrings#ExpandTabs) | `str.expandtabs` in Python | [#27](https://github.com/huandu/xstrings/issues/27) | +| [FirstRuneToLower](https://godoc.org/github.com/huandu/xstrings#FirstRuneToLower) | `lcfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) | +| [FirstRuneToUpper](https://godoc.org/github.com/huandu/xstrings#FirstRuneToUpper) | `String#capitalize` in Ruby; `ucfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) | +| [Insert](https://godoc.org/github.com/huandu/xstrings#Insert) | `String#insert` in Ruby | [#18](https://github.com/huandu/xstrings/issues/18) | +| [LastPartition](https://godoc.org/github.com/huandu/xstrings#LastPartition) | `str.rpartition` in Python; `String#rpartition` in Ruby | [#19](https://github.com/huandu/xstrings/issues/19) | +| [LeftJustify](https://godoc.org/github.com/huandu/xstrings#LeftJustify) | `str.ljust` in Python; `String#ljust` in Ruby | [#28](https://github.com/huandu/xstrings/issues/28) | +| [Len](https://godoc.org/github.com/huandu/xstrings#Len) | `mb_strlen` in PHP | [#23](https://github.com/huandu/xstrings/issues/23) | +| [Partition](https://godoc.org/github.com/huandu/xstrings#Partition) | `str.partition` in Python; `String#partition` in Ruby | [#10](https://github.com/huandu/xstrings/issues/10) | +| [Reverse](https://godoc.org/github.com/huandu/xstrings#Reverse) | `String#reverse` in Ruby; `strrev` in PHP; `reverse` in Perl | [#7](https://github.com/huandu/xstrings/issues/7) | +| [RightJustify](https://godoc.org/github.com/huandu/xstrings#RightJustify) | `str.rjust` in Python; `String#rjust` in Ruby | [#29](https://github.com/huandu/xstrings/issues/29) | +| [RuneWidth](https://godoc.org/github.com/huandu/xstrings#RuneWidth) | - | [#27](https://github.com/huandu/xstrings/issues/27) | +| [Scrub](https://godoc.org/github.com/huandu/xstrings#Scrub) | `String#scrub` in Ruby | [#20](https://github.com/huandu/xstrings/issues/20) | +| [Shuffle](https://godoc.org/github.com/huandu/xstrings#Shuffle) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) | +| [ShuffleSource](https://godoc.org/github.com/huandu/xstrings#ShuffleSource) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) | +| [Slice](https://godoc.org/github.com/huandu/xstrings#Slice) | `mb_substr` in PHP | [#9](https://github.com/huandu/xstrings/issues/9) | +| [Squeeze](https://godoc.org/github.com/huandu/xstrings#Squeeze) | `String#squeeze` in Ruby | [#11](https://github.com/huandu/xstrings/issues/11) | +| [Successor](https://godoc.org/github.com/huandu/xstrings#Successor) | `String#succ` or `String#next` in Ruby | [#22](https://github.com/huandu/xstrings/issues/22) | +| [SwapCase](https://godoc.org/github.com/huandu/xstrings#SwapCase) | `str.swapcase` in Python; `String#swapcase` in Ruby | [#12](https://github.com/huandu/xstrings/issues/12) | +| [ToCamelCase](https://godoc.org/github.com/huandu/xstrings#ToCamelCase) | `String#camelize` in RoR | [#1](https://github.com/huandu/xstrings/issues/1) | +| [ToKebab](https://godoc.org/github.com/huandu/xstrings#ToKebabCase) | - | [#41](https://github.com/huandu/xstrings/issues/41) | +| [ToSnakeCase](https://godoc.org/github.com/huandu/xstrings#ToSnakeCase) | `String#underscore` in RoR | [#1](https://github.com/huandu/xstrings/issues/1) | +| [Translate](https://godoc.org/github.com/huandu/xstrings#Translate) | `str.translate` in Python; `String#tr` in Ruby; `strtr` in PHP; `tr///` in Perl | [#21](https://github.com/huandu/xstrings/issues/21) | +| [Width](https://godoc.org/github.com/huandu/xstrings#Width) | `mb_strwidth` in PHP | [#26](https://github.com/huandu/xstrings/issues/26) | +| [WordCount](https://godoc.org/github.com/huandu/xstrings#WordCount) | `str_word_count` in PHP | [#14](https://github.com/huandu/xstrings/issues/14) | +| [WordSplit](https://godoc.org/github.com/huandu/xstrings#WordSplit) | - | [#14](https://github.com/huandu/xstrings/issues/14) | + +### Package `strings` functions ### + +*Keep this table sorted by Function in ascending order.* + +| Function | Friends | +| -------- | ------- | +| [Contains](http://golang.org/pkg/strings/#Contains) | `String#include?` in Ruby | +| [ContainsAny](http://golang.org/pkg/strings/#ContainsAny) | - | +| [ContainsRune](http://golang.org/pkg/strings/#ContainsRune) | - | +| [Count](http://golang.org/pkg/strings/#Count) | `str.count` in Python; `substr_count` in PHP | +| [EqualFold](http://golang.org/pkg/strings/#EqualFold) | `stricmp` in PHP; `String#casecmp` in Ruby | +| [Fields](http://golang.org/pkg/strings/#Fields) | `str.split` in Python; `split` in Perl; `String#split` in Ruby | +| [FieldsFunc](http://golang.org/pkg/strings/#FieldsFunc) | - | +| [HasPrefix](http://golang.org/pkg/strings/#HasPrefix) | `str.startswith` in Python; `String#start_with?` in Ruby | +| [HasSuffix](http://golang.org/pkg/strings/#HasSuffix) | `str.endswith` in Python; `String#end_with?` in Ruby | +| [Index](http://golang.org/pkg/strings/#Index) | `str.index` in Python; `String#index` in Ruby; `strpos` in PHP; `index` in Perl | +| [IndexAny](http://golang.org/pkg/strings/#IndexAny) | - | +| [IndexByte](http://golang.org/pkg/strings/#IndexByte) | - | +| [IndexFunc](http://golang.org/pkg/strings/#IndexFunc) | - | +| [IndexRune](http://golang.org/pkg/strings/#IndexRune) | - | +| [Join](http://golang.org/pkg/strings/#Join) | `str.join` in Python; `Array#join` in Ruby; `implode` in PHP; `join` in Perl | +| [LastIndex](http://golang.org/pkg/strings/#LastIndex) | `str.rindex` in Python; `String#rindex`; `strrpos` in PHP; `rindex` in Perl | +| [LastIndexAny](http://golang.org/pkg/strings/#LastIndexAny) | - | +| [LastIndexFunc](http://golang.org/pkg/strings/#LastIndexFunc) | - | +| [Map](http://golang.org/pkg/strings/#Map) | `String#each_codepoint` in Ruby | +| [Repeat](http://golang.org/pkg/strings/#Repeat) | operator `*` in Python and Ruby; `str_repeat` in PHP | +| [Replace](http://golang.org/pkg/strings/#Replace) | `str.replace` in Python; `String#sub` in Ruby; `str_replace` in PHP | +| [Split](http://golang.org/pkg/strings/#Split) | `str.split` in Python; `String#split` in Ruby; `explode` in PHP; `split` in Perl | +| [SplitAfter](http://golang.org/pkg/strings/#SplitAfter) | - | +| [SplitAfterN](http://golang.org/pkg/strings/#SplitAfterN) | - | +| [SplitN](http://golang.org/pkg/strings/#SplitN) | `str.split` in Python; `String#split` in Ruby; `explode` in PHP; `split` in Perl | +| [Title](http://golang.org/pkg/strings/#Title) | `str.title` in Python | +| [ToLower](http://golang.org/pkg/strings/#ToLower) | `str.lower` in Python; `String#downcase` in Ruby; `strtolower` in PHP; `lc` in Perl | +| [ToLowerSpecial](http://golang.org/pkg/strings/#ToLowerSpecial) | - | +| [ToTitle](http://golang.org/pkg/strings/#ToTitle) | - | +| [ToTitleSpecial](http://golang.org/pkg/strings/#ToTitleSpecial) | - | +| [ToUpper](http://golang.org/pkg/strings/#ToUpper) | `str.upper` in Python; `String#upcase` in Ruby; `strtoupper` in PHP; `uc` in Perl | +| [ToUpperSpecial](http://golang.org/pkg/strings/#ToUpperSpecial) | - | +| [Trim](http://golang.org/pkg/strings/#Trim) | `str.strip` in Python; `String#strip` in Ruby; `trim` in PHP | +| [TrimFunc](http://golang.org/pkg/strings/#TrimFunc) | - | +| [TrimLeft](http://golang.org/pkg/strings/#TrimLeft) | `str.lstrip` in Python; `String#lstrip` in Ruby; `ltrim` in PHP | +| [TrimLeftFunc](http://golang.org/pkg/strings/#TrimLeftFunc) | - | +| [TrimPrefix](http://golang.org/pkg/strings/#TrimPrefix) | - | +| [TrimRight](http://golang.org/pkg/strings/#TrimRight) | `str.rstrip` in Python; `String#rstrip` in Ruby; `rtrim` in PHP | +| [TrimRightFunc](http://golang.org/pkg/strings/#TrimRightFunc) | - | +| [TrimSpace](http://golang.org/pkg/strings/#TrimSpace) | `str.strip` in Python; `String#strip` in Ruby; `trim` in PHP | +| [TrimSuffix](http://golang.org/pkg/strings/#TrimSuffix) | `String#chomp` in Ruby; `chomp` in Perl | + +## License ## + +This library is licensed under MIT license. See LICENSE for details. diff --git a/vendor/github.com/huandu/xstrings/common.go b/vendor/github.com/huandu/xstrings/common.go new file mode 100644 index 000000000..f427cc84e --- /dev/null +++ b/vendor/github.com/huandu/xstrings/common.go @@ -0,0 +1,21 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +const bufferMaxInitGrowSize = 2048 + +// Lazy initialize a buffer. +func allocBuffer(orig, cur string) *stringBuilder { + output := &stringBuilder{} + maxSize := len(orig) * 4 + + // Avoid to reserve too much memory at once. + if maxSize > bufferMaxInitGrowSize { + maxSize = bufferMaxInitGrowSize + } + + output.Grow(maxSize) + output.WriteString(orig[:len(orig)-len(cur)]) + return output +} diff --git a/vendor/github.com/huandu/xstrings/convert.go b/vendor/github.com/huandu/xstrings/convert.go new file mode 100644 index 000000000..3d5a34950 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/convert.go @@ -0,0 +1,590 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +import ( + "math/rand" + "unicode" + "unicode/utf8" +) + +// ToCamelCase is to convert words separated by space, underscore and hyphen to camel case. +// +// Some samples. +// "some_words" => "SomeWords" +// "http_server" => "HttpServer" +// "no_https" => "NoHttps" +// "_complex__case_" => "_Complex_Case_" +// "some words" => "SomeWords" +func ToCamelCase(str string) string { + if len(str) == 0 { + return "" + } + + buf := &stringBuilder{} + var r0, r1 rune + var size int + + // leading connector will appear in output. + for len(str) > 0 { + r0, size = utf8.DecodeRuneInString(str) + str = str[size:] + + if !isConnector(r0) { + r0 = unicode.ToUpper(r0) + break + } + + buf.WriteRune(r0) + } + + if len(str) == 0 { + // A special case for a string contains only 1 rune. + if size != 0 { + buf.WriteRune(r0) + } + + return buf.String() + } + + for len(str) > 0 { + r1 = r0 + r0, size = utf8.DecodeRuneInString(str) + str = str[size:] + + if isConnector(r0) && isConnector(r1) { + buf.WriteRune(r1) + continue + } + + if isConnector(r1) { + r0 = unicode.ToUpper(r0) + } else { + r0 = unicode.ToLower(r0) + buf.WriteRune(r1) + } + } + + buf.WriteRune(r0) + return buf.String() +} + +// ToSnakeCase can convert all upper case characters in a string to +// snake case format. +// +// Some samples. +// "FirstName" => "first_name" +// "HTTPServer" => "http_server" +// "NoHTTPS" => "no_https" +// "GO_PATH" => "go_path" +// "GO PATH" => "go_path" // space is converted to underscore. +// "GO-PATH" => "go_path" // hyphen is converted to underscore. +// "http2xx" => "http_2xx" // insert an underscore before a number and after an alphabet. +// "HTTP20xOK" => "http_20x_ok" +// "Duration2m3s" => "duration_2m3s" +// "Bld4Floor3rd" => "bld4_floor_3rd" +func ToSnakeCase(str string) string { + return camelCaseToLowerCase(str, '_') +} + +// ToKebabCase can convert all upper case characters in a string to +// kebab case format. +// +// Some samples. +// "FirstName" => "first-name" +// "HTTPServer" => "http-server" +// "NoHTTPS" => "no-https" +// "GO_PATH" => "go-path" +// "GO PATH" => "go-path" // space is converted to '-'. +// "GO-PATH" => "go-path" // hyphen is converted to '-'. +// "http2xx" => "http-2xx" // insert an underscore before a number and after an alphabet. +// "HTTP20xOK" => "http-20x-ok" +// "Duration2m3s" => "duration-2m3s" +// "Bld4Floor3rd" => "bld4-floor-3rd" +func ToKebabCase(str string) string { + return camelCaseToLowerCase(str, '-') +} + +func camelCaseToLowerCase(str string, connector rune) string { + if len(str) == 0 { + return "" + } + + buf := &stringBuilder{} + wt, word, remaining := nextWord(str) + + for len(remaining) > 0 { + if wt != connectorWord { + toLower(buf, wt, word, connector) + } + + prev := wt + last := word + wt, word, remaining = nextWord(remaining) + + switch prev { + case numberWord: + for wt == alphabetWord || wt == numberWord { + toLower(buf, wt, word, connector) + wt, word, remaining = nextWord(remaining) + } + + if wt != invalidWord && wt != punctWord { + buf.WriteRune(connector) + } + + case connectorWord: + toLower(buf, prev, last, connector) + + case punctWord: + // nothing. + + default: + if wt != numberWord { + if wt != connectorWord && wt != punctWord { + buf.WriteRune(connector) + } + + break + } + + if len(remaining) == 0 { + break + } + + last := word + wt, word, remaining = nextWord(remaining) + + // consider number as a part of previous word. + // e.g. "Bld4Floor" => "bld4_floor" + if wt != alphabetWord { + toLower(buf, numberWord, last, connector) + + if wt != connectorWord && wt != punctWord { + buf.WriteRune(connector) + } + + break + } + + // if there are some lower case letters following a number, + // add connector before the number. + // e.g. "HTTP2xx" => "http_2xx" + buf.WriteRune(connector) + toLower(buf, numberWord, last, connector) + + for wt == alphabetWord || wt == numberWord { + toLower(buf, wt, word, connector) + wt, word, remaining = nextWord(remaining) + } + + if wt != invalidWord && wt != connectorWord && wt != punctWord { + buf.WriteRune(connector) + } + } + } + + toLower(buf, wt, word, connector) + return buf.String() +} + +func isConnector(r rune) bool { + return r == '-' || r == '_' || unicode.IsSpace(r) +} + +type wordType int + +const ( + invalidWord wordType = iota + numberWord + upperCaseWord + alphabetWord + connectorWord + punctWord + otherWord +) + +func nextWord(str string) (wt wordType, word, remaining string) { + if len(str) == 0 { + return + } + + var offset int + remaining = str + r, size := nextValidRune(remaining, utf8.RuneError) + offset += size + + if r == utf8.RuneError { + wt = invalidWord + word = str[:offset] + remaining = str[offset:] + return + } + + switch { + case isConnector(r): + wt = connectorWord + remaining = remaining[size:] + + for len(remaining) > 0 { + r, size = nextValidRune(remaining, r) + + if !isConnector(r) { + break + } + + offset += size + remaining = remaining[size:] + } + + case unicode.IsPunct(r): + wt = punctWord + remaining = remaining[size:] + + for len(remaining) > 0 { + r, size = nextValidRune(remaining, r) + + if !unicode.IsPunct(r) { + break + } + + offset += size + remaining = remaining[size:] + } + + case unicode.IsUpper(r): + wt = upperCaseWord + remaining = remaining[size:] + + if len(remaining) == 0 { + break + } + + r, size = nextValidRune(remaining, r) + + switch { + case unicode.IsUpper(r): + prevSize := size + offset += size + remaining = remaining[size:] + + for len(remaining) > 0 { + r, size = nextValidRune(remaining, r) + + if !unicode.IsUpper(r) { + break + } + + prevSize = size + offset += size + remaining = remaining[size:] + } + + // it's a bit complex when dealing with a case like "HTTPStatus". + // it's expected to be splitted into "HTTP" and "Status". + // Therefore "S" should be in remaining instead of word. + if len(remaining) > 0 && isAlphabet(r) { + offset -= prevSize + remaining = str[offset:] + } + + case isAlphabet(r): + offset += size + remaining = remaining[size:] + + for len(remaining) > 0 { + r, size = nextValidRune(remaining, r) + + if !isAlphabet(r) || unicode.IsUpper(r) { + break + } + + offset += size + remaining = remaining[size:] + } + } + + case isAlphabet(r): + wt = alphabetWord + remaining = remaining[size:] + + for len(remaining) > 0 { + r, size = nextValidRune(remaining, r) + + if !isAlphabet(r) || unicode.IsUpper(r) { + break + } + + offset += size + remaining = remaining[size:] + } + + case unicode.IsNumber(r): + wt = numberWord + remaining = remaining[size:] + + for len(remaining) > 0 { + r, size = nextValidRune(remaining, r) + + if !unicode.IsNumber(r) { + break + } + + offset += size + remaining = remaining[size:] + } + + default: + wt = otherWord + remaining = remaining[size:] + + for len(remaining) > 0 { + r, size = nextValidRune(remaining, r) + + if size == 0 || isConnector(r) || isAlphabet(r) || unicode.IsNumber(r) || unicode.IsPunct(r) { + break + } + + offset += size + remaining = remaining[size:] + } + } + + word = str[:offset] + return +} + +func nextValidRune(str string, prev rune) (r rune, size int) { + var sz int + + for len(str) > 0 { + r, sz = utf8.DecodeRuneInString(str) + size += sz + + if r != utf8.RuneError { + return + } + + str = str[sz:] + } + + r = prev + return +} + +func toLower(buf *stringBuilder, wt wordType, str string, connector rune) { + buf.Grow(buf.Len() + len(str)) + + if wt != upperCaseWord && wt != connectorWord { + buf.WriteString(str) + return + } + + for len(str) > 0 { + r, size := utf8.DecodeRuneInString(str) + str = str[size:] + + if isConnector(r) { + buf.WriteRune(connector) + } else if unicode.IsUpper(r) { + buf.WriteRune(unicode.ToLower(r)) + } else { + buf.WriteRune(r) + } + } +} + +// SwapCase will swap characters case from upper to lower or lower to upper. +func SwapCase(str string) string { + var r rune + var size int + + buf := &stringBuilder{} + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + + switch { + case unicode.IsUpper(r): + buf.WriteRune(unicode.ToLower(r)) + + case unicode.IsLower(r): + buf.WriteRune(unicode.ToUpper(r)) + + default: + buf.WriteRune(r) + } + + str = str[size:] + } + + return buf.String() +} + +// FirstRuneToUpper converts first rune to upper case if necessary. +func FirstRuneToUpper(str string) string { + if str == "" { + return str + } + + r, size := utf8.DecodeRuneInString(str) + + if !unicode.IsLower(r) { + return str + } + + buf := &stringBuilder{} + buf.WriteRune(unicode.ToUpper(r)) + buf.WriteString(str[size:]) + return buf.String() +} + +// FirstRuneToLower converts first rune to lower case if necessary. +func FirstRuneToLower(str string) string { + if str == "" { + return str + } + + r, size := utf8.DecodeRuneInString(str) + + if !unicode.IsUpper(r) { + return str + } + + buf := &stringBuilder{} + buf.WriteRune(unicode.ToLower(r)) + buf.WriteString(str[size:]) + return buf.String() +} + +// Shuffle randomizes runes in a string and returns the result. +// It uses default random source in `math/rand`. +func Shuffle(str string) string { + if str == "" { + return str + } + + runes := []rune(str) + index := 0 + + for i := len(runes) - 1; i > 0; i-- { + index = rand.Intn(i + 1) + + if i != index { + runes[i], runes[index] = runes[index], runes[i] + } + } + + return string(runes) +} + +// ShuffleSource randomizes runes in a string with given random source. +func ShuffleSource(str string, src rand.Source) string { + if str == "" { + return str + } + + runes := []rune(str) + index := 0 + r := rand.New(src) + + for i := len(runes) - 1; i > 0; i-- { + index = r.Intn(i + 1) + + if i != index { + runes[i], runes[index] = runes[index], runes[i] + } + } + + return string(runes) +} + +// Successor returns the successor to string. +// +// If there is one alphanumeric rune is found in string, increase the rune by 1. +// If increment generates a "carry", the rune to the left of it is incremented. +// This process repeats until there is no carry, adding an additional rune if necessary. +// +// If there is no alphanumeric rune, the rightmost rune will be increased by 1 +// regardless whether the result is a valid rune or not. +// +// Only following characters are alphanumeric. +// * a - z +// * A - Z +// * 0 - 9 +// +// Samples (borrowed from ruby's String#succ document): +// "abcd" => "abce" +// "THX1138" => "THX1139" +// "<>" => "<>" +// "1999zzz" => "2000aaa" +// "ZZZ9999" => "AAAA0000" +// "***" => "**+" +func Successor(str string) string { + if str == "" { + return str + } + + var r rune + var i int + carry := ' ' + runes := []rune(str) + l := len(runes) + lastAlphanumeric := l + + for i = l - 1; i >= 0; i-- { + r = runes[i] + + if ('a' <= r && r <= 'y') || + ('A' <= r && r <= 'Y') || + ('0' <= r && r <= '8') { + runes[i]++ + carry = ' ' + lastAlphanumeric = i + break + } + + switch r { + case 'z': + runes[i] = 'a' + carry = 'a' + lastAlphanumeric = i + + case 'Z': + runes[i] = 'A' + carry = 'A' + lastAlphanumeric = i + + case '9': + runes[i] = '0' + carry = '0' + lastAlphanumeric = i + } + } + + // Needs to add one character for carry. + if i < 0 && carry != ' ' { + buf := &stringBuilder{} + buf.Grow(l + 4) // Reserve enough space for write. + + if lastAlphanumeric != 0 { + buf.WriteString(str[:lastAlphanumeric]) + } + + buf.WriteRune(carry) + + for _, r = range runes[lastAlphanumeric:] { + buf.WriteRune(r) + } + + return buf.String() + } + + // No alphanumeric character. Simply increase last rune's value. + if lastAlphanumeric == l { + runes[l-1]++ + } + + return string(runes) +} diff --git a/vendor/github.com/huandu/xstrings/count.go b/vendor/github.com/huandu/xstrings/count.go new file mode 100644 index 000000000..f96e38703 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/count.go @@ -0,0 +1,120 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +import ( + "unicode" + "unicode/utf8" +) + +// Len returns str's utf8 rune length. +func Len(str string) int { + return utf8.RuneCountInString(str) +} + +// WordCount returns number of words in a string. +// +// Word is defined as a locale dependent string containing alphabetic characters, +// which may also contain but not start with `'` and `-` characters. +func WordCount(str string) int { + var r rune + var size, n int + + inWord := false + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + + switch { + case isAlphabet(r): + if !inWord { + inWord = true + n++ + } + + case inWord && (r == '\'' || r == '-'): + // Still in word. + + default: + inWord = false + } + + str = str[size:] + } + + return n +} + +const minCJKCharacter = '\u3400' + +// Checks r is a letter but not CJK character. +func isAlphabet(r rune) bool { + if !unicode.IsLetter(r) { + return false + } + + switch { + // Quick check for non-CJK character. + case r < minCJKCharacter: + return true + + // Common CJK characters. + case r >= '\u4E00' && r <= '\u9FCC': + return false + + // Rare CJK characters. + case r >= '\u3400' && r <= '\u4D85': + return false + + // Rare and historic CJK characters. + case r >= '\U00020000' && r <= '\U0002B81D': + return false + } + + return true +} + +// Width returns string width in monotype font. +// Multi-byte characters are usually twice the width of single byte characters. +// +// Algorithm comes from `mb_strwidth` in PHP. +// http://php.net/manual/en/function.mb-strwidth.php +func Width(str string) int { + var r rune + var size, n int + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + n += RuneWidth(r) + str = str[size:] + } + + return n +} + +// RuneWidth returns character width in monotype font. +// Multi-byte characters are usually twice the width of single byte characters. +// +// Algorithm comes from `mb_strwidth` in PHP. +// http://php.net/manual/en/function.mb-strwidth.php +func RuneWidth(r rune) int { + switch { + case r == utf8.RuneError || r < '\x20': + return 0 + + case '\x20' <= r && r < '\u2000': + return 1 + + case '\u2000' <= r && r < '\uFF61': + return 2 + + case '\uFF61' <= r && r < '\uFFA0': + return 1 + + case '\uFFA0' <= r: + return 2 + } + + return 0 +} diff --git a/vendor/github.com/huandu/xstrings/doc.go b/vendor/github.com/huandu/xstrings/doc.go new file mode 100644 index 000000000..1a6ef069f --- /dev/null +++ b/vendor/github.com/huandu/xstrings/doc.go @@ -0,0 +1,8 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +// Package xstrings is to provide string algorithms which are useful but not included in `strings` package. +// See project home page for details. https://github.com/huandu/xstrings +// +// Package xstrings assumes all strings are encoded in utf8. +package xstrings diff --git a/vendor/github.com/huandu/xstrings/format.go b/vendor/github.com/huandu/xstrings/format.go new file mode 100644 index 000000000..8cd76c525 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/format.go @@ -0,0 +1,169 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +import ( + "unicode/utf8" +) + +// ExpandTabs can expand tabs ('\t') rune in str to one or more spaces dpending on +// current column and tabSize. +// The column number is reset to zero after each newline ('\n') occurring in the str. +// +// ExpandTabs uses RuneWidth to decide rune's width. +// For example, CJK characters will be treated as two characters. +// +// If tabSize <= 0, ExpandTabs panics with error. +// +// Samples: +// ExpandTabs("a\tbc\tdef\tghij\tk", 4) => "a bc def ghij k" +// ExpandTabs("abcdefg\thij\nk\tl", 4) => "abcdefg hij\nk l" +// ExpandTabs("z中\t文\tw", 4) => "z中 文 w" +func ExpandTabs(str string, tabSize int) string { + if tabSize <= 0 { + panic("tab size must be positive") + } + + var r rune + var i, size, column, expand int + var output *stringBuilder + + orig := str + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + + if r == '\t' { + expand = tabSize - column%tabSize + + if output == nil { + output = allocBuffer(orig, str) + } + + for i = 0; i < expand; i++ { + output.WriteRune(' ') + } + + column += expand + } else { + if r == '\n' { + column = 0 + } else { + column += RuneWidth(r) + } + + if output != nil { + output.WriteRune(r) + } + } + + str = str[size:] + } + + if output == nil { + return orig + } + + return output.String() +} + +// LeftJustify returns a string with pad string at right side if str's rune length is smaller than length. +// If str's rune length is larger than length, str itself will be returned. +// +// If pad is an empty string, str will be returned. +// +// Samples: +// LeftJustify("hello", 4, " ") => "hello" +// LeftJustify("hello", 10, " ") => "hello " +// LeftJustify("hello", 10, "123") => "hello12312" +func LeftJustify(str string, length int, pad string) string { + l := Len(str) + + if l >= length || pad == "" { + return str + } + + remains := length - l + padLen := Len(pad) + + output := &stringBuilder{} + output.Grow(len(str) + (remains/padLen+1)*len(pad)) + output.WriteString(str) + writePadString(output, pad, padLen, remains) + return output.String() +} + +// RightJustify returns a string with pad string at left side if str's rune length is smaller than length. +// If str's rune length is larger than length, str itself will be returned. +// +// If pad is an empty string, str will be returned. +// +// Samples: +// RightJustify("hello", 4, " ") => "hello" +// RightJustify("hello", 10, " ") => " hello" +// RightJustify("hello", 10, "123") => "12312hello" +func RightJustify(str string, length int, pad string) string { + l := Len(str) + + if l >= length || pad == "" { + return str + } + + remains := length - l + padLen := Len(pad) + + output := &stringBuilder{} + output.Grow(len(str) + (remains/padLen+1)*len(pad)) + writePadString(output, pad, padLen, remains) + output.WriteString(str) + return output.String() +} + +// Center returns a string with pad string at both side if str's rune length is smaller than length. +// If str's rune length is larger than length, str itself will be returned. +// +// If pad is an empty string, str will be returned. +// +// Samples: +// Center("hello", 4, " ") => "hello" +// Center("hello", 10, " ") => " hello " +// Center("hello", 10, "123") => "12hello123" +func Center(str string, length int, pad string) string { + l := Len(str) + + if l >= length || pad == "" { + return str + } + + remains := length - l + padLen := Len(pad) + + output := &stringBuilder{} + output.Grow(len(str) + (remains/padLen+1)*len(pad)) + writePadString(output, pad, padLen, remains/2) + output.WriteString(str) + writePadString(output, pad, padLen, (remains+1)/2) + return output.String() +} + +func writePadString(output *stringBuilder, pad string, padLen, remains int) { + var r rune + var size int + + repeats := remains / padLen + + for i := 0; i < repeats; i++ { + output.WriteString(pad) + } + + remains = remains % padLen + + if remains != 0 { + for i := 0; i < remains; i++ { + r, size = utf8.DecodeRuneInString(pad) + output.WriteRune(r) + pad = pad[size:] + } + } +} diff --git a/vendor/github.com/huandu/xstrings/manipulate.go b/vendor/github.com/huandu/xstrings/manipulate.go new file mode 100644 index 000000000..64075f9bb --- /dev/null +++ b/vendor/github.com/huandu/xstrings/manipulate.go @@ -0,0 +1,216 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +import ( + "strings" + "unicode/utf8" +) + +// Reverse a utf8 encoded string. +func Reverse(str string) string { + var size int + + tail := len(str) + buf := make([]byte, tail) + s := buf + + for len(str) > 0 { + _, size = utf8.DecodeRuneInString(str) + tail -= size + s = append(s[:tail], []byte(str[:size])...) + str = str[size:] + } + + return string(buf) +} + +// Slice a string by rune. +// +// Start must satisfy 0 <= start <= rune length. +// +// End can be positive, zero or negative. +// If end >= 0, start and end must satisfy start <= end <= rune length. +// If end < 0, it means slice to the end of string. +// +// Otherwise, Slice will panic as out of range. +func Slice(str string, start, end int) string { + var size, startPos, endPos int + + origin := str + + if start < 0 || end > len(str) || (end >= 0 && start > end) { + panic("out of range") + } + + if end >= 0 { + end -= start + } + + for start > 0 && len(str) > 0 { + _, size = utf8.DecodeRuneInString(str) + start-- + startPos += size + str = str[size:] + } + + if end < 0 { + return origin[startPos:] + } + + endPos = startPos + + for end > 0 && len(str) > 0 { + _, size = utf8.DecodeRuneInString(str) + end-- + endPos += size + str = str[size:] + } + + if len(str) == 0 && (start > 0 || end > 0) { + panic("out of range") + } + + return origin[startPos:endPos] +} + +// Partition splits a string by sep into three parts. +// The return value is a slice of strings with head, match and tail. +// +// If str contains sep, for example "hello" and "l", Partition returns +// "he", "l", "lo" +// +// If str doesn't contain sep, for example "hello" and "x", Partition returns +// "hello", "", "" +func Partition(str, sep string) (head, match, tail string) { + index := strings.Index(str, sep) + + if index == -1 { + head = str + return + } + + head = str[:index] + match = str[index : index+len(sep)] + tail = str[index+len(sep):] + return +} + +// LastPartition splits a string by last instance of sep into three parts. +// The return value is a slice of strings with head, match and tail. +// +// If str contains sep, for example "hello" and "l", LastPartition returns +// "hel", "l", "o" +// +// If str doesn't contain sep, for example "hello" and "x", LastPartition returns +// "", "", "hello" +func LastPartition(str, sep string) (head, match, tail string) { + index := strings.LastIndex(str, sep) + + if index == -1 { + tail = str + return + } + + head = str[:index] + match = str[index : index+len(sep)] + tail = str[index+len(sep):] + return +} + +// Insert src into dst at given rune index. +// Index is counted by runes instead of bytes. +// +// If index is out of range of dst, panic with out of range. +func Insert(dst, src string, index int) string { + return Slice(dst, 0, index) + src + Slice(dst, index, -1) +} + +// Scrub scrubs invalid utf8 bytes with repl string. +// Adjacent invalid bytes are replaced only once. +func Scrub(str, repl string) string { + var buf *stringBuilder + var r rune + var size, pos int + var hasError bool + + origin := str + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + + if r == utf8.RuneError { + if !hasError { + if buf == nil { + buf = &stringBuilder{} + } + + buf.WriteString(origin[:pos]) + hasError = true + } + } else if hasError { + hasError = false + buf.WriteString(repl) + + origin = origin[pos:] + pos = 0 + } + + pos += size + str = str[size:] + } + + if buf != nil { + buf.WriteString(origin) + return buf.String() + } + + // No invalid byte. + return origin +} + +// WordSplit splits a string into words. Returns a slice of words. +// If there is no word in a string, return nil. +// +// Word is defined as a locale dependent string containing alphabetic characters, +// which may also contain but not start with `'` and `-` characters. +func WordSplit(str string) []string { + var word string + var words []string + var r rune + var size, pos int + + inWord := false + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + + switch { + case isAlphabet(r): + if !inWord { + inWord = true + word = str + pos = 0 + } + + case inWord && (r == '\'' || r == '-'): + // Still in word. + + default: + if inWord { + inWord = false + words = append(words, word[:pos]) + } + } + + pos += size + str = str[size:] + } + + if inWord { + words = append(words, word[:pos]) + } + + return words +} diff --git a/vendor/github.com/huandu/xstrings/stringbuilder.go b/vendor/github.com/huandu/xstrings/stringbuilder.go new file mode 100644 index 000000000..bb0919d32 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/stringbuilder.go @@ -0,0 +1,7 @@ +//+build go1.10 + +package xstrings + +import "strings" + +type stringBuilder = strings.Builder diff --git a/vendor/github.com/huandu/xstrings/stringbuilder_go110.go b/vendor/github.com/huandu/xstrings/stringbuilder_go110.go new file mode 100644 index 000000000..dac389d13 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/stringbuilder_go110.go @@ -0,0 +1,9 @@ +//+build !go1.10 + +package xstrings + +import "bytes" + +type stringBuilder struct { + bytes.Buffer +} diff --git a/vendor/github.com/huandu/xstrings/translate.go b/vendor/github.com/huandu/xstrings/translate.go new file mode 100644 index 000000000..42e694fb1 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/translate.go @@ -0,0 +1,546 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +import ( + "unicode" + "unicode/utf8" +) + +type runeRangeMap struct { + FromLo rune // Lower bound of range map. + FromHi rune // An inclusive higher bound of range map. + ToLo rune + ToHi rune +} + +type runeDict struct { + Dict [unicode.MaxASCII + 1]rune +} + +type runeMap map[rune]rune + +// Translator can translate string with pre-compiled from and to patterns. +// If a from/to pattern pair needs to be used more than once, it's recommended +// to create a Translator and reuse it. +type Translator struct { + quickDict *runeDict // A quick dictionary to look up rune by index. Only available for latin runes. + runeMap runeMap // Rune map for translation. + ranges []*runeRangeMap // Ranges of runes. + mappedRune rune // If mappedRune >= 0, all matched runes are translated to the mappedRune. + reverted bool // If to pattern is empty, all matched characters will be deleted. + hasPattern bool +} + +// NewTranslator creates new Translator through a from/to pattern pair. +func NewTranslator(from, to string) *Translator { + tr := &Translator{} + + if from == "" { + return tr + } + + reverted := from[0] == '^' + deletion := len(to) == 0 + + if reverted { + from = from[1:] + } + + var fromStart, fromEnd, fromRangeStep rune + var toStart, toEnd, toRangeStep rune + var fromRangeSize, toRangeSize rune + var singleRunes []rune + + // Update the to rune range. + updateRange := func() { + // No more rune to read in the to rune pattern. + if toEnd == utf8.RuneError { + return + } + + if toRangeStep == 0 { + to, toStart, toEnd, toRangeStep = nextRuneRange(to, toEnd) + return + } + + // Current range is not empty. Consume 1 rune from start. + if toStart != toEnd { + toStart += toRangeStep + return + } + + // No more rune. Repeat the last rune. + if to == "" { + toEnd = utf8.RuneError + return + } + + // Both start and end are used. Read two more runes from the to pattern. + to, toStart, toEnd, toRangeStep = nextRuneRange(to, utf8.RuneError) + } + + if deletion { + toStart = utf8.RuneError + toEnd = utf8.RuneError + } else { + // If from pattern is reverted, only the last rune in the to pattern will be used. + if reverted { + var size int + + for len(to) > 0 { + toStart, size = utf8.DecodeRuneInString(to) + to = to[size:] + } + + toEnd = utf8.RuneError + } else { + to, toStart, toEnd, toRangeStep = nextRuneRange(to, utf8.RuneError) + } + } + + fromEnd = utf8.RuneError + + for len(from) > 0 { + from, fromStart, fromEnd, fromRangeStep = nextRuneRange(from, fromEnd) + + // fromStart is a single character. Just map it with a rune in the to pattern. + if fromRangeStep == 0 { + singleRunes = tr.addRune(fromStart, toStart, singleRunes) + updateRange() + continue + } + + for toEnd != utf8.RuneError && fromStart != fromEnd { + // If mapped rune is a single character instead of a range, simply shift first + // rune in the range. + if toRangeStep == 0 { + singleRunes = tr.addRune(fromStart, toStart, singleRunes) + updateRange() + fromStart += fromRangeStep + continue + } + + fromRangeSize = (fromEnd - fromStart) * fromRangeStep + toRangeSize = (toEnd - toStart) * toRangeStep + + // Not enough runes in the to pattern. Need to read more. + if fromRangeSize > toRangeSize { + fromStart, toStart = tr.addRuneRange(fromStart, fromStart+toRangeSize*fromRangeStep, toStart, toEnd, singleRunes) + fromStart += fromRangeStep + updateRange() + + // Edge case: If fromRangeSize == toRangeSize + 1, the last fromStart value needs be considered + // as a single rune. + if fromStart == fromEnd { + singleRunes = tr.addRune(fromStart, toStart, singleRunes) + updateRange() + } + + continue + } + + fromStart, toStart = tr.addRuneRange(fromStart, fromEnd, toStart, toStart+fromRangeSize*toRangeStep, singleRunes) + updateRange() + break + } + + if fromStart == fromEnd { + fromEnd = utf8.RuneError + continue + } + + _, toStart = tr.addRuneRange(fromStart, fromEnd, toStart, toStart, singleRunes) + fromEnd = utf8.RuneError + } + + if fromEnd != utf8.RuneError { + tr.addRune(fromEnd, toStart, singleRunes) + } + + tr.reverted = reverted + tr.mappedRune = -1 + tr.hasPattern = true + + // Translate RuneError only if in deletion or reverted mode. + if deletion || reverted { + tr.mappedRune = toStart + } + + return tr +} + +func (tr *Translator) addRune(from, to rune, singleRunes []rune) []rune { + if from <= unicode.MaxASCII { + if tr.quickDict == nil { + tr.quickDict = &runeDict{} + } + + tr.quickDict.Dict[from] = to + } else { + if tr.runeMap == nil { + tr.runeMap = make(runeMap) + } + + tr.runeMap[from] = to + } + + singleRunes = append(singleRunes, from) + return singleRunes +} + +func (tr *Translator) addRuneRange(fromLo, fromHi, toLo, toHi rune, singleRunes []rune) (rune, rune) { + var r rune + var rrm *runeRangeMap + + if fromLo < fromHi { + rrm = &runeRangeMap{ + FromLo: fromLo, + FromHi: fromHi, + ToLo: toLo, + ToHi: toHi, + } + } else { + rrm = &runeRangeMap{ + FromLo: fromHi, + FromHi: fromLo, + ToLo: toHi, + ToHi: toLo, + } + } + + // If there is any single rune conflicts with this rune range, clear single rune record. + for _, r = range singleRunes { + if rrm.FromLo <= r && r <= rrm.FromHi { + if r <= unicode.MaxASCII { + tr.quickDict.Dict[r] = 0 + } else { + delete(tr.runeMap, r) + } + } + } + + tr.ranges = append(tr.ranges, rrm) + return fromHi, toHi +} + +func nextRuneRange(str string, last rune) (remaining string, start, end rune, rangeStep rune) { + var r rune + var size int + + remaining = str + escaping := false + isRange := false + + for len(remaining) > 0 { + r, size = utf8.DecodeRuneInString(remaining) + remaining = remaining[size:] + + // Parse special characters. + if !escaping { + if r == '\\' { + escaping = true + continue + } + + if r == '-' { + // Ignore slash at beginning of string. + if last == utf8.RuneError { + continue + } + + start = last + isRange = true + continue + } + } + + escaping = false + + if last != utf8.RuneError { + // This is a range which start and end are the same. + // Considier it as a normal character. + if isRange && last == r { + isRange = false + continue + } + + start = last + end = r + + if isRange { + if start < end { + rangeStep = 1 + } else { + rangeStep = -1 + } + } + + return + } + + last = r + } + + start = last + end = utf8.RuneError + return +} + +// Translate str with a from/to pattern pair. +// +// See comment in Translate function for usage and samples. +func (tr *Translator) Translate(str string) string { + if !tr.hasPattern || str == "" { + return str + } + + var r rune + var size int + var needTr bool + + orig := str + + var output *stringBuilder + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + r, needTr = tr.TranslateRune(r) + + if needTr && output == nil { + output = allocBuffer(orig, str) + } + + if r != utf8.RuneError && output != nil { + output.WriteRune(r) + } + + str = str[size:] + } + + // No character is translated. + if output == nil { + return orig + } + + return output.String() +} + +// TranslateRune return translated rune and true if r matches the from pattern. +// If r doesn't match the pattern, original r is returned and translated is false. +func (tr *Translator) TranslateRune(r rune) (result rune, translated bool) { + switch { + case tr.quickDict != nil: + if r <= unicode.MaxASCII { + result = tr.quickDict.Dict[r] + + if result != 0 { + translated = true + + if tr.mappedRune >= 0 { + result = tr.mappedRune + } + + break + } + } + + fallthrough + + case tr.runeMap != nil: + var ok bool + + if result, ok = tr.runeMap[r]; ok { + translated = true + + if tr.mappedRune >= 0 { + result = tr.mappedRune + } + + break + } + + fallthrough + + default: + var rrm *runeRangeMap + ranges := tr.ranges + + for i := len(ranges) - 1; i >= 0; i-- { + rrm = ranges[i] + + if rrm.FromLo <= r && r <= rrm.FromHi { + translated = true + + if tr.mappedRune >= 0 { + result = tr.mappedRune + break + } + + if rrm.ToLo < rrm.ToHi { + result = rrm.ToLo + r - rrm.FromLo + } else if rrm.ToLo > rrm.ToHi { + // ToHi can be smaller than ToLo if range is from higher to lower. + result = rrm.ToLo - r + rrm.FromLo + } else { + result = rrm.ToLo + } + + break + } + } + } + + if tr.reverted { + if !translated { + result = tr.mappedRune + } + + translated = !translated + } + + if !translated { + result = r + } + + return +} + +// HasPattern returns true if Translator has one pattern at least. +func (tr *Translator) HasPattern() bool { + return tr.hasPattern +} + +// Translate str with the characters defined in from replaced by characters defined in to. +// +// From and to are patterns representing a set of characters. Pattern is defined as following. +// +// * Special characters +// * '-' means a range of runes, e.g. +// * "a-z" means all characters from 'a' to 'z' inclusive; +// * "z-a" means all characters from 'z' to 'a' inclusive. +// * '^' as first character means a set of all runes excepted listed, e.g. +// * "^a-z" means all characters except 'a' to 'z' inclusive. +// * '\' escapes special characters. +// * Normal character represents itself, e.g. "abc" is a set including 'a', 'b' and 'c'. +// +// Translate will try to find a 1:1 mapping from from to to. +// If to is smaller than from, last rune in to will be used to map "out of range" characters in from. +// +// Note that '^' only works in the from pattern. It will be considered as a normal character in the to pattern. +// +// If the to pattern is an empty string, Translate works exactly the same as Delete. +// +// Samples: +// Translate("hello", "aeiou", "12345") => "h2ll4" +// Translate("hello", "a-z", "A-Z") => "HELLO" +// Translate("hello", "z-a", "a-z") => "svool" +// Translate("hello", "aeiou", "*") => "h*ll*" +// Translate("hello", "^l", "*") => "**ll*" +// Translate("hello ^ world", `\^lo`, "*") => "he*** * w*r*d" +func Translate(str, from, to string) string { + tr := NewTranslator(from, to) + return tr.Translate(str) +} + +// Delete runes in str matching the pattern. +// Pattern is defined in Translate function. +// +// Samples: +// Delete("hello", "aeiou") => "hll" +// Delete("hello", "a-k") => "llo" +// Delete("hello", "^a-k") => "he" +func Delete(str, pattern string) string { + tr := NewTranslator(pattern, "") + return tr.Translate(str) +} + +// Count how many runes in str match the pattern. +// Pattern is defined in Translate function. +// +// Samples: +// Count("hello", "aeiou") => 3 +// Count("hello", "a-k") => 3 +// Count("hello", "^a-k") => 2 +func Count(str, pattern string) int { + if pattern == "" || str == "" { + return 0 + } + + var r rune + var size int + var matched bool + + tr := NewTranslator(pattern, "") + cnt := 0 + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + str = str[size:] + + if _, matched = tr.TranslateRune(r); matched { + cnt++ + } + } + + return cnt +} + +// Squeeze deletes adjacent repeated runes in str. +// If pattern is not empty, only runes matching the pattern will be squeezed. +// +// Samples: +// Squeeze("hello", "") => "helo" +// Squeeze("hello", "m-z") => "hello" +// Squeeze("hello world", " ") => "hello world" +func Squeeze(str, pattern string) string { + var last, r rune + var size int + var skipSqueeze, matched bool + var tr *Translator + var output *stringBuilder + + orig := str + last = -1 + + if len(pattern) > 0 { + tr = NewTranslator(pattern, "") + } + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + + // Need to squeeze the str. + if last == r && !skipSqueeze { + if tr != nil { + if _, matched = tr.TranslateRune(r); !matched { + skipSqueeze = true + } + } + + if output == nil { + output = allocBuffer(orig, str) + } + + if skipSqueeze { + output.WriteRune(r) + } + } else { + if output != nil { + output.WriteRune(r) + } + + last = r + skipSqueeze = false + } + + str = str[size:] + } + + if output == nil { + return orig + } + + return output.String() +} diff --git a/vendor/github.com/imdario/mergo/.deepsource.toml b/vendor/github.com/imdario/mergo/.deepsource.toml new file mode 100644 index 000000000..8a0681af8 --- /dev/null +++ b/vendor/github.com/imdario/mergo/.deepsource.toml @@ -0,0 +1,12 @@ +version = 1 + +test_patterns = [ + "*_test.go" +] + +[[analyzers]] +name = "go" +enabled = true + + [analyzers.meta] + import_path = "github.com/imdario/mergo" \ No newline at end of file diff --git a/vendor/github.com/imdario/mergo/.gitignore b/vendor/github.com/imdario/mergo/.gitignore new file mode 100644 index 000000000..529c3412b --- /dev/null +++ b/vendor/github.com/imdario/mergo/.gitignore @@ -0,0 +1,33 @@ +#### joe made this: http://goel.io/joe + +#### go #### +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ + +#### vim #### +# Swap +[._]*.s[a-v][a-z] +[._]*.sw[a-p] +[._]s[a-v][a-z] +[._]sw[a-p] + +# Session +Session.vim + +# Temporary +.netrwhist +*~ +# Auto-generated tag files +tags diff --git a/vendor/github.com/imdario/mergo/.travis.yml b/vendor/github.com/imdario/mergo/.travis.yml new file mode 100644 index 000000000..d324c43ba --- /dev/null +++ b/vendor/github.com/imdario/mergo/.travis.yml @@ -0,0 +1,12 @@ +language: go +arch: + - amd64 + - ppc64le +install: + - go get -t + - go get golang.org/x/tools/cmd/cover + - go get github.com/mattn/goveralls +script: + - go test -race -v ./... +after_script: + - $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN diff --git a/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md b/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..469b44907 --- /dev/null +++ b/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md @@ -0,0 +1,46 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at i@dario.im. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/imdario/mergo/LICENSE b/vendor/github.com/imdario/mergo/LICENSE new file mode 100644 index 000000000..686680298 --- /dev/null +++ b/vendor/github.com/imdario/mergo/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2013 Dario Castañé. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md new file mode 100644 index 000000000..7e6f7aeee --- /dev/null +++ b/vendor/github.com/imdario/mergo/README.md @@ -0,0 +1,235 @@ +# Mergo + + +[![GoDoc][3]][4] +[![GitHub release][5]][6] +[![GoCard][7]][8] +[![Build Status][1]][2] +[![Coverage Status][9]][10] +[![Sourcegraph][11]][12] +[![FOSSA Status][13]][14] +[![Become my sponsor][15]][16] + +[1]: https://travis-ci.org/imdario/mergo.png +[2]: https://travis-ci.org/imdario/mergo +[3]: https://godoc.org/github.com/imdario/mergo?status.svg +[4]: https://godoc.org/github.com/imdario/mergo +[5]: https://img.shields.io/github/release/imdario/mergo.svg +[6]: https://github.com/imdario/mergo/releases +[7]: https://goreportcard.com/badge/imdario/mergo +[8]: https://goreportcard.com/report/github.com/imdario/mergo +[9]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master +[10]: https://coveralls.io/github/imdario/mergo?branch=master +[11]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg +[12]: https://sourcegraph.com/github.com/imdario/mergo?badge +[13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield +[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield +[15]: https://img.shields.io/github/sponsors/imdario +[16]: https://github.com/sponsors/imdario + +A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. + +Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). + +Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche. + +## Status + +It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild). + +### Important note + +Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds support for go modules. + +Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code. + +If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). + +### Donations + +If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes: + +Buy Me a Coffee at ko-fi.com +Donate using Liberapay +Become my sponsor + +### Mergo in the wild + +- [cli/cli](https://github.com/cli/cli) +- [moby/moby](https://github.com/moby/moby) +- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) +- [vmware/dispatch](https://github.com/vmware/dispatch) +- [Shopify/themekit](https://github.com/Shopify/themekit) +- [imdario/zas](https://github.com/imdario/zas) +- [matcornic/hermes](https://github.com/matcornic/hermes) +- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go) +- [kataras/iris](https://github.com/kataras/iris) +- [michaelsauter/crane](https://github.com/michaelsauter/crane) +- [go-task/task](https://github.com/go-task/task) +- [sensu/uchiwa](https://github.com/sensu/uchiwa) +- [ory/hydra](https://github.com/ory/hydra) +- [sisatech/vcli](https://github.com/sisatech/vcli) +- [dairycart/dairycart](https://github.com/dairycart/dairycart) +- [projectcalico/felix](https://github.com/projectcalico/felix) +- [resin-os/balena](https://github.com/resin-os/balena) +- [go-kivik/kivik](https://github.com/go-kivik/kivik) +- [Telefonica/govice](https://github.com/Telefonica/govice) +- [supergiant/supergiant](supergiant/supergiant) +- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce) +- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy) +- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel) +- [EagerIO/Stout](https://github.com/EagerIO/Stout) +- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api) +- [russross/canvasassignments](https://github.com/russross/canvasassignments) +- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api) +- [casualjim/exeggutor](https://github.com/casualjim/exeggutor) +- [divshot/gitling](https://github.com/divshot/gitling) +- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl) +- [andrerocker/deploy42](https://github.com/andrerocker/deploy42) +- [elwinar/rambler](https://github.com/elwinar/rambler) +- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman) +- [jfbus/impressionist](https://github.com/jfbus/impressionist) +- [Jmeyering/zealot](https://github.com/Jmeyering/zealot) +- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host) +- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go) +- [thoas/picfit](https://github.com/thoas/picfit) +- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) +- [jnuthong/item_search](https://github.com/jnuthong/item_search) +- [bukalapak/snowboard](https://github.com/bukalapak/snowboard) +- [containerssh/containerssh](https://github.com/containerssh/containerssh) +- [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser) +- [tjpnz/structbot](https://github.com/tjpnz/structbot) + +## Install + + go get github.com/imdario/mergo + + // use in your .go code + import ( + "github.com/imdario/mergo" + ) + +## Usage + +You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are zero values](https://golang.org/ref/spec#The_zero_value) too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). + +```go +if err := mergo.Merge(&dst, src); err != nil { + // ... +} +``` + +Also, you can merge overwriting values using the transformer `WithOverride`. + +```go +if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { + // ... +} +``` + +Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field. + +```go +if err := mergo.Map(&dst, srcMap); err != nil { + // ... +} +``` + +Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values. + +Here is a nice example: + +```go +package main + +import ( + "fmt" + "github.com/imdario/mergo" +) + +type Foo struct { + A string + B int64 +} + +func main() { + src := Foo{ + A: "one", + B: 2, + } + dest := Foo{ + A: "two", + } + mergo.Merge(&dest, src) + fmt.Println(dest) + // Will print + // {two 2} +} +``` + +Note: if test are failing due missing package, please execute: + + go get gopkg.in/yaml.v3 + +### Transformers + +Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`? + +```go +package main + +import ( + "fmt" + "github.com/imdario/mergo" + "reflect" + "time" +) + +type timeTransformer struct { +} + +func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { + if typ == reflect.TypeOf(time.Time{}) { + return func(dst, src reflect.Value) error { + if dst.CanSet() { + isZero := dst.MethodByName("IsZero") + result := isZero.Call([]reflect.Value{}) + if result[0].Bool() { + dst.Set(src) + } + } + return nil + } + } + return nil +} + +type Snapshot struct { + Time time.Time + // ... +} + +func main() { + src := Snapshot{time.Now()} + dest := Snapshot{} + mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) + fmt.Println(dest) + // Will print + // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } +} +``` + +## Contact me + +If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario) + +## About + +Written by [Dario Castañé](http://dario.im). + +## License + +[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). + + +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large) diff --git a/vendor/github.com/imdario/mergo/doc.go b/vendor/github.com/imdario/mergo/doc.go new file mode 100644 index 000000000..fcd985f99 --- /dev/null +++ b/vendor/github.com/imdario/mergo/doc.go @@ -0,0 +1,143 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. + +Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). + +Status + +It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc. + +Important note + +Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules. + +Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code. + +If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u github.com/imdario/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). + +Install + +Do your usual installation procedure: + + go get github.com/imdario/mergo + + // use in your .go code + import ( + "github.com/imdario/mergo" + ) + +Usage + +You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). + + if err := mergo.Merge(&dst, src); err != nil { + // ... + } + +Also, you can merge overwriting values using the transformer WithOverride. + + if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { + // ... + } + +Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field. + + if err := mergo.Map(&dst, srcMap); err != nil { + // ... + } + +Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values. + +Here is a nice example: + + package main + + import ( + "fmt" + "github.com/imdario/mergo" + ) + + type Foo struct { + A string + B int64 + } + + func main() { + src := Foo{ + A: "one", + B: 2, + } + dest := Foo{ + A: "two", + } + mergo.Merge(&dest, src) + fmt.Println(dest) + // Will print + // {two 2} + } + +Transformers + +Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, time.Time is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero time.Time? + + package main + + import ( + "fmt" + "github.com/imdario/mergo" + "reflect" + "time" + ) + + type timeTransformer struct { + } + + func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { + if typ == reflect.TypeOf(time.Time{}) { + return func(dst, src reflect.Value) error { + if dst.CanSet() { + isZero := dst.MethodByName("IsZero") + result := isZero.Call([]reflect.Value{}) + if result[0].Bool() { + dst.Set(src) + } + } + return nil + } + } + return nil + } + + type Snapshot struct { + Time time.Time + // ... + } + + func main() { + src := Snapshot{time.Now()} + dest := Snapshot{} + mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) + fmt.Println(dest) + // Will print + // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } + } + +Contact me + +If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): https://twitter.com/im_dario + +About + +Written by Dario Castañé: https://da.rio.hn + +License + +BSD 3-Clause license, as Go language. + +*/ +package mergo diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go new file mode 100644 index 000000000..a13a7ee46 --- /dev/null +++ b/vendor/github.com/imdario/mergo/map.go @@ -0,0 +1,178 @@ +// Copyright 2014 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "fmt" + "reflect" + "unicode" + "unicode/utf8" +) + +func changeInitialCase(s string, mapper func(rune) rune) string { + if s == "" { + return s + } + r, n := utf8.DecodeRuneInString(s) + return string(mapper(r)) + s[n:] +} + +func isExported(field reflect.StructField) bool { + r, _ := utf8.DecodeRuneInString(field.Name) + return r >= 'A' && r <= 'Z' +} + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { + overwrite := config.Overwrite + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return nil + } + } + // Remember, remember... + visited[h] = &visit{addr, typ, seen} + } + zeroValue := reflect.Value{} + switch dst.Kind() { + case reflect.Map: + dstMap := dst.Interface().(map[string]interface{}) + for i, n := 0, src.NumField(); i < n; i++ { + srcType := src.Type() + field := srcType.Field(i) + if !isExported(field) { + continue + } + fieldName := field.Name + fieldName = changeInitialCase(fieldName, unicode.ToLower) + if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) { + dstMap[fieldName] = src.Field(i).Interface() + } + } + case reflect.Ptr: + if dst.IsNil() { + v := reflect.New(dst.Type().Elem()) + dst.Set(v) + } + dst = dst.Elem() + fallthrough + case reflect.Struct: + srcMap := src.Interface().(map[string]interface{}) + for key := range srcMap { + config.overwriteWithEmptyValue = true + srcValue := srcMap[key] + fieldName := changeInitialCase(key, unicode.ToUpper) + dstElement := dst.FieldByName(fieldName) + if dstElement == zeroValue { + // We discard it because the field doesn't exist. + continue + } + srcElement := reflect.ValueOf(srcValue) + dstKind := dstElement.Kind() + srcKind := srcElement.Kind() + if srcKind == reflect.Ptr && dstKind != reflect.Ptr { + srcElement = srcElement.Elem() + srcKind = reflect.TypeOf(srcElement.Interface()).Kind() + } else if dstKind == reflect.Ptr { + // Can this work? I guess it can't. + if srcKind != reflect.Ptr && srcElement.CanAddr() { + srcPtr := srcElement.Addr() + srcElement = reflect.ValueOf(srcPtr) + srcKind = reflect.Ptr + } + } + + if !srcElement.IsValid() { + continue + } + if srcKind == dstKind { + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface { + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } else if srcKind == reflect.Map { + if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } else { + return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind) + } + } + } + return +} + +// Map sets fields' values in dst from src. +// src can be a map with string keys or a struct. dst must be the opposite: +// if src is a map, dst must be a valid pointer to struct. If src is a struct, +// dst must be map[string]interface{}. +// It won't merge unexported (private) fields and will do recursively +// any exported field. +// If dst is a map, keys will be src fields' names in lower camel case. +// Missing key in src that doesn't match a field in dst will be skipped. This +// doesn't apply if dst is a map. +// This is separated method from Merge because it is cleaner and it keeps sane +// semantics: merging equal types, mapping different (restricted) types. +func Map(dst, src interface{}, opts ...func(*Config)) error { + return _map(dst, src, opts...) +} + +// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by +// non-empty src attribute values. +// Deprecated: Use Map(…) with WithOverride +func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { + return _map(dst, src, append(opts, WithOverride)...) +} + +func _map(dst, src interface{}, opts ...func(*Config)) error { + if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { + return ErrNonPointerAgument + } + var ( + vDst, vSrc reflect.Value + err error + ) + config := &Config{} + + for _, opt := range opts { + opt(config) + } + + if vDst, vSrc, err = resolveValues(dst, src); err != nil { + return err + } + // To be friction-less, we redirect equal-type arguments + // to deepMerge. Only because arguments can be anything. + if vSrc.Kind() == vDst.Kind() { + return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) + } + switch vSrc.Kind() { + case reflect.Struct: + if vDst.Kind() != reflect.Map { + return ErrExpectedMapAsDestination + } + case reflect.Map: + if vDst.Kind() != reflect.Struct { + return ErrExpectedStructAsDestination + } + default: + return ErrNotSupported + } + return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config) +} diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go new file mode 100644 index 000000000..8b4e2f47a --- /dev/null +++ b/vendor/github.com/imdario/mergo/merge.go @@ -0,0 +1,380 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "fmt" + "reflect" +) + +func hasMergeableFields(dst reflect.Value) (exported bool) { + for i, n := 0, dst.NumField(); i < n; i++ { + field := dst.Type().Field(i) + if field.Anonymous && dst.Field(i).Kind() == reflect.Struct { + exported = exported || hasMergeableFields(dst.Field(i)) + } else if isExportedComponent(&field) { + exported = exported || len(field.PkgPath) == 0 + } + } + return +} + +func isExportedComponent(field *reflect.StructField) bool { + pkgPath := field.PkgPath + if len(pkgPath) > 0 { + return false + } + c := field.Name[0] + if 'a' <= c && c <= 'z' || c == '_' { + return false + } + return true +} + +type Config struct { + Overwrite bool + AppendSlice bool + TypeCheck bool + Transformers Transformers + overwriteWithEmptyValue bool + overwriteSliceWithEmptyValue bool + sliceDeepCopy bool + debug bool +} + +type Transformers interface { + Transformer(reflect.Type) func(dst, src reflect.Value) error +} + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { + overwrite := config.Overwrite + typeCheck := config.TypeCheck + overwriteWithEmptySrc := config.overwriteWithEmptyValue + overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue + sliceDeepCopy := config.sliceDeepCopy + + if !src.IsValid() { + return + } + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return nil + } + } + // Remember, remember... + visited[h] = &visit{addr, typ, seen} + } + + if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() { + if fn := config.Transformers.Transformer(dst.Type()); fn != nil { + err = fn(dst, src) + return + } + } + + switch dst.Kind() { + case reflect.Struct: + if hasMergeableFields(dst) { + for i, n := 0, dst.NumField(); i < n; i++ { + if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil { + return + } + } + } else { + if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) { + dst.Set(src) + } + } + case reflect.Map: + if dst.IsNil() && !src.IsNil() { + if dst.CanSet() { + dst.Set(reflect.MakeMap(dst.Type())) + } else { + dst = src + return + } + } + + if src.Kind() != reflect.Map { + if overwrite { + dst.Set(src) + } + return + } + + for _, key := range src.MapKeys() { + srcElement := src.MapIndex(key) + if !srcElement.IsValid() { + continue + } + dstElement := dst.MapIndex(key) + switch srcElement.Kind() { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice: + if srcElement.IsNil() { + if overwrite { + dst.SetMapIndex(key, srcElement) + } + continue + } + fallthrough + default: + if !srcElement.CanInterface() { + continue + } + switch reflect.TypeOf(srcElement.Interface()).Kind() { + case reflect.Struct: + fallthrough + case reflect.Ptr: + fallthrough + case reflect.Map: + srcMapElm := srcElement + dstMapElm := dstElement + if srcMapElm.CanInterface() { + srcMapElm = reflect.ValueOf(srcMapElm.Interface()) + if dstMapElm.IsValid() { + dstMapElm = reflect.ValueOf(dstMapElm.Interface()) + } + } + if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil { + return + } + case reflect.Slice: + srcSlice := reflect.ValueOf(srcElement.Interface()) + + var dstSlice reflect.Value + if !dstElement.IsValid() || dstElement.IsNil() { + dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len()) + } else { + dstSlice = reflect.ValueOf(dstElement.Interface()) + } + + if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { + if typeCheck && srcSlice.Type() != dstSlice.Type() { + return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) + } + dstSlice = srcSlice + } else if config.AppendSlice { + if srcSlice.Type() != dstSlice.Type() { + return fmt.Errorf("cannot append two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) + } + dstSlice = reflect.AppendSlice(dstSlice, srcSlice) + } else if sliceDeepCopy { + i := 0 + for ; i < srcSlice.Len() && i < dstSlice.Len(); i++ { + srcElement := srcSlice.Index(i) + dstElement := dstSlice.Index(i) + + if srcElement.CanInterface() { + srcElement = reflect.ValueOf(srcElement.Interface()) + } + if dstElement.CanInterface() { + dstElement = reflect.ValueOf(dstElement.Interface()) + } + + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } + + } + dst.SetMapIndex(key, dstSlice) + } + } + if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) { + continue + } + + if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement)) { + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + dst.SetMapIndex(key, srcElement) + } + } + case reflect.Slice: + if !dst.CanSet() { + break + } + if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { + dst.Set(src) + } else if config.AppendSlice { + if src.Type() != dst.Type() { + return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type()) + } + dst.Set(reflect.AppendSlice(dst, src)) + } else if sliceDeepCopy { + for i := 0; i < src.Len() && i < dst.Len(); i++ { + srcElement := src.Index(i) + dstElement := dst.Index(i) + if srcElement.CanInterface() { + srcElement = reflect.ValueOf(srcElement.Interface()) + } + if dstElement.CanInterface() { + dstElement = reflect.ValueOf(dstElement.Interface()) + } + + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } + } + case reflect.Ptr: + fallthrough + case reflect.Interface: + if isReflectNil(src) { + if overwriteWithEmptySrc && dst.CanSet() && src.Type().AssignableTo(dst.Type()) { + dst.Set(src) + } + break + } + + if src.Kind() != reflect.Interface { + if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) { + if dst.CanSet() && (overwrite || isEmptyValue(dst)) { + dst.Set(src) + } + } else if src.Kind() == reflect.Ptr { + if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { + return + } + } else if dst.Elem().Type() == src.Type() { + if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { + return + } + } else { + return ErrDifferentArgumentsTypes + } + break + } + + if dst.IsNil() || overwrite { + if dst.CanSet() && (overwrite || isEmptyValue(dst)) { + dst.Set(src) + } + break + } + + if dst.Elem().Kind() == src.Elem().Kind() { + if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { + return + } + break + } + default: + mustSet := (isEmptyValue(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) + if mustSet { + if dst.CanSet() { + dst.Set(src) + } else { + dst = src + } + } + } + + return +} + +// Merge will fill any empty for value type attributes on the dst struct using corresponding +// src attributes if they themselves are not empty. dst and src must be valid same-type structs +// and dst must be a pointer to struct. +// It won't merge unexported (private) fields and will do recursively any exported field. +func Merge(dst, src interface{}, opts ...func(*Config)) error { + return merge(dst, src, opts...) +} + +// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overridden by +// non-empty src attribute values. +// Deprecated: use Merge(…) with WithOverride +func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { + return merge(dst, src, append(opts, WithOverride)...) +} + +// WithTransformers adds transformers to merge, allowing to customize the merging of some types. +func WithTransformers(transformers Transformers) func(*Config) { + return func(config *Config) { + config.Transformers = transformers + } +} + +// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values. +func WithOverride(config *Config) { + config.Overwrite = true +} + +// WithOverwriteWithEmptyValue will make merge override non empty dst attributes with empty src attributes values. +func WithOverwriteWithEmptyValue(config *Config) { + config.Overwrite = true + config.overwriteWithEmptyValue = true +} + +// WithOverrideEmptySlice will make merge override empty dst slice with empty src slice. +func WithOverrideEmptySlice(config *Config) { + config.overwriteSliceWithEmptyValue = true +} + +// WithAppendSlice will make merge append slices instead of overwriting it. +func WithAppendSlice(config *Config) { + config.AppendSlice = true +} + +// WithTypeCheck will make merge check types while overwriting it (must be used with WithOverride). +func WithTypeCheck(config *Config) { + config.TypeCheck = true +} + +// WithSliceDeepCopy will merge slice element one by one with Overwrite flag. +func WithSliceDeepCopy(config *Config) { + config.sliceDeepCopy = true + config.Overwrite = true +} + +func merge(dst, src interface{}, opts ...func(*Config)) error { + if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { + return ErrNonPointerAgument + } + var ( + vDst, vSrc reflect.Value + err error + ) + + config := &Config{} + + for _, opt := range opts { + opt(config) + } + + if vDst, vSrc, err = resolveValues(dst, src); err != nil { + return err + } + if vDst.Type() != vSrc.Type() { + return ErrDifferentArgumentsTypes + } + return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) +} + +// IsReflectNil is the reflect value provided nil +func isReflectNil(v reflect.Value) bool { + k := v.Kind() + switch k { + case reflect.Interface, reflect.Slice, reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr: + // Both interface and slice are nil if first word is 0. + // Both are always bigger than a word; assume flagIndir. + return v.IsNil() + default: + return false + } +} diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go new file mode 100644 index 000000000..9fe362d47 --- /dev/null +++ b/vendor/github.com/imdario/mergo/mergo.go @@ -0,0 +1,78 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "errors" + "reflect" +) + +// Errors reported by Mergo when it finds invalid arguments. +var ( + ErrNilArguments = errors.New("src and dst must not be nil") + ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type") + ErrNotSupported = errors.New("only structs, maps, and slices are supported") + ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") + ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") + ErrNonPointerAgument = errors.New("dst must be a pointer") +) + +// During deepMerge, must keep track of checks that are +// in progress. The comparison algorithm assumes that all +// checks in progress are true when it reencounters them. +// Visited are stored in a map indexed by 17 * a1 + a2; +type visit struct { + ptr uintptr + typ reflect.Type + next *visit +} + +// From src/pkg/encoding/json/encode.go. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + if v.IsNil() { + return true + } + return isEmptyValue(v.Elem()) + case reflect.Func: + return v.IsNil() + case reflect.Invalid: + return true + } + return false +} + +func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { + if dst == nil || src == nil { + err = ErrNilArguments + return + } + vDst = reflect.ValueOf(dst).Elem() + if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map && vDst.Kind() != reflect.Slice { + err = ErrNotSupported + return + } + vSrc = reflect.ValueOf(src) + // We check if vSrc is a pointer to dereference it. + if vSrc.Kind() == reflect.Ptr { + vSrc = vSrc.Elem() + } + return +} diff --git a/vendor/github.com/mitchellh/cli/LICENSE b/vendor/github.com/mitchellh/cli/LICENSE new file mode 100644 index 000000000..c33dcc7c9 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/mitchellh/cli/Makefile b/vendor/github.com/mitchellh/cli/Makefile new file mode 100644 index 000000000..89c0a1209 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/Makefile @@ -0,0 +1,17 @@ +TEST?=./... + +default: test + +# test runs the test suite and vets the code +test: + go list $(TEST) | xargs -n1 go test -timeout=60s -parallel=10 $(TESTARGS) + +# testrace runs the race checker +testrace: + go list $(TEST) | xargs -n1 go test -race $(TESTARGS) + +# updatedeps installs all the dependencies to run and build +updatedeps: + go mod download + +.PHONY: test testrace updatedeps diff --git a/vendor/github.com/mitchellh/cli/README.md b/vendor/github.com/mitchellh/cli/README.md new file mode 100644 index 000000000..d75ff863f --- /dev/null +++ b/vendor/github.com/mitchellh/cli/README.md @@ -0,0 +1,66 @@ +# Go CLI Library [![GoDoc](https://godoc.org/github.com/mitchellh/cli?status.png)](https://pkg.go.dev/github.com/mitchellh/cli) + +cli is a library for implementing command-line interfaces in Go. +cli is the library that powers the CLI for +[Packer](https://github.com/mitchellh/packer), +[Consul](https://github.com/hashicorp/consul), +[Vault](https://github.com/hashicorp/vault), +[Terraform](https://github.com/hashicorp/terraform), +[Nomad](https://github.com/hashicorp/nomad), and more. + +## Features + +* Easy sub-command based CLIs: `cli foo`, `cli bar`, etc. + +* Support for nested subcommands such as `cli foo bar`. + +* Optional support for default subcommands so `cli` does something + other than error. + +* Support for shell autocompletion of subcommands, flags, and arguments + with callbacks in Go. You don't need to write any shell code. + +* Automatic help generation for listing subcommands. + +* Automatic help flag recognition of `-h`, `--help`, etc. + +* Automatic version flag recognition of `-v`, `--version`. + +* Helpers for interacting with the terminal, such as outputting information, + asking for input, etc. These are optional, you can always interact with the + terminal however you choose. + +* Use of Go interfaces/types makes augmenting various parts of the library a + piece of cake. + +## Example + +Below is a simple example of creating and running a CLI + +```go +package main + +import ( + "log" + "os" + + "github.com/mitchellh/cli" +) + +func main() { + c := cli.NewCLI("app", "1.0.0") + c.Args = os.Args[1:] + c.Commands = map[string]cli.CommandFactory{ + "foo": fooCommandFactory, + "bar": barCommandFactory, + } + + exitStatus, err := c.Run() + if err != nil { + log.Println(err) + } + + os.Exit(exitStatus) +} +``` + diff --git a/vendor/github.com/mitchellh/cli/autocomplete.go b/vendor/github.com/mitchellh/cli/autocomplete.go new file mode 100644 index 000000000..3bec6258f --- /dev/null +++ b/vendor/github.com/mitchellh/cli/autocomplete.go @@ -0,0 +1,43 @@ +package cli + +import ( + "github.com/posener/complete/cmd/install" +) + +// autocompleteInstaller is an interface to be implemented to perform the +// autocomplete installation and uninstallation with a CLI. +// +// This interface is not exported because it only exists for unit tests +// to be able to test that the installation is called properly. +type autocompleteInstaller interface { + Install(string) error + Uninstall(string) error +} + +// realAutocompleteInstaller uses the real install package to do the +// install/uninstall. +type realAutocompleteInstaller struct{} + +func (i *realAutocompleteInstaller) Install(cmd string) error { + return install.Install(cmd) +} + +func (i *realAutocompleteInstaller) Uninstall(cmd string) error { + return install.Uninstall(cmd) +} + +// mockAutocompleteInstaller is used for tests to record the install/uninstall. +type mockAutocompleteInstaller struct { + InstallCalled bool + UninstallCalled bool +} + +func (i *mockAutocompleteInstaller) Install(cmd string) error { + i.InstallCalled = true + return nil +} + +func (i *mockAutocompleteInstaller) Uninstall(cmd string) error { + i.UninstallCalled = true + return nil +} diff --git a/vendor/github.com/mitchellh/cli/cli.go b/vendor/github.com/mitchellh/cli/cli.go new file mode 100644 index 000000000..952053287 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/cli.go @@ -0,0 +1,742 @@ +package cli + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "regexp" + "sort" + "strings" + "sync" + "text/template" + + "github.com/Masterminds/sprig/v3" + "github.com/armon/go-radix" + "github.com/posener/complete" +) + +// CLI contains the state necessary to run subcommands and parse the +// command line arguments. +// +// CLI also supports nested subcommands, such as "cli foo bar". To use +// nested subcommands, the key in the Commands mapping below contains the +// full subcommand. In this example, it would be "foo bar". +// +// If you use a CLI with nested subcommands, some semantics change due to +// ambiguities: +// +// * We use longest prefix matching to find a matching subcommand. This +// means if you register "foo bar" and the user executes "cli foo qux", +// the "foo" command will be executed with the arg "qux". It is up to +// you to handle these args. One option is to just return the special +// help return code `RunResultHelp` to display help and exit. +// +// * The help flag "-h" or "-help" will look at all args to determine +// the help function. For example: "otto apps list -h" will show the +// help for "apps list" but "otto apps -h" will show it for "apps". +// In the normal CLI, only the first subcommand is used. +// +// * The help flag will list any subcommands that a command takes +// as well as the command's help itself. If there are no subcommands, +// it will note this. If the CLI itself has no subcommands, this entire +// section is omitted. +// +// * Any parent commands that don't exist are automatically created as +// no-op commands that just show help for other subcommands. For example, +// if you only register "foo bar", then "foo" is automatically created. +// +type CLI struct { + // Args is the list of command-line arguments received excluding + // the name of the app. For example, if the command "./cli foo bar" + // was invoked, then Args should be []string{"foo", "bar"}. + Args []string + + // Commands is a mapping of subcommand names to a factory function + // for creating that Command implementation. If there is a command + // with a blank string "", then it will be used as the default command + // if no subcommand is specified. + // + // If the key has a space in it, this will create a nested subcommand. + // For example, if the key is "foo bar", then to access it our CLI + // must be accessed with "./cli foo bar". See the docs for CLI for + // notes on how this changes some other behavior of the CLI as well. + // + // The factory should be as cheap as possible, ideally only allocating + // a struct. The factory may be called multiple times in the course + // of a command execution and certain events such as help require the + // instantiation of all commands. Expensive initialization should be + // deferred to function calls within the interface implementation. + Commands map[string]CommandFactory + + // HiddenCommands is a list of commands that are "hidden". Hidden + // commands are not given to the help function callback and do not + // show up in autocomplete. The values in the slice should be equivalent + // to the keys in the command map. + HiddenCommands []string + + // Name defines the name of the CLI. + Name string + + // Version of the CLI. + Version string + + // Autocomplete enables or disables subcommand auto-completion support. + // This is enabled by default when NewCLI is called. Otherwise, this + // must enabled explicitly. + // + // Autocomplete requires the "Name" option to be set on CLI. This name + // should be set exactly to the binary name that is autocompleted. + // + // Autocompletion is supported via the github.com/posener/complete + // library. This library supports bash, zsh and fish. To add support + // for other shells, please see that library. + // + // AutocompleteInstall and AutocompleteUninstall are the global flag + // names for installing and uninstalling the autocompletion handlers + // for the user's shell. The flag should omit the hyphen(s) in front of + // the value. Both single and double hyphens will automatically be supported + // for the flag name. These default to `autocomplete-install` and + // `autocomplete-uninstall` respectively. + // + // AutocompleteNoDefaultFlags is a boolean which controls if the default auto- + // complete flags like -help and -version are added to the output. + // + // AutocompleteGlobalFlags are a mapping of global flags for + // autocompletion. The help and version flags are automatically added. + Autocomplete bool + AutocompleteInstall string + AutocompleteUninstall string + AutocompleteNoDefaultFlags bool + AutocompleteGlobalFlags complete.Flags + autocompleteInstaller autocompleteInstaller // For tests + + // HelpFunc is the function called to generate the generic help + // text that is shown if help must be shown for the CLI that doesn't + // pertain to a specific command. + HelpFunc HelpFunc + + // HelpWriter is used to print help text and version when requested. + // Defaults to os.Stderr for backwards compatibility. + // It is recommended that you set HelpWriter to os.Stdout, and + // ErrorWriter to os.Stderr. + HelpWriter io.Writer + + // ErrorWriter used to output errors when a command can not be run. + // Defaults to the value of HelpWriter for backwards compatibility. + // It is recommended that you set HelpWriter to os.Stdout, and + // ErrorWriter to os.Stderr. + ErrorWriter io.Writer + + //--------------------------------------------------------------- + // Internal fields set automatically + + once sync.Once + autocomplete *complete.Complete + commandTree *radix.Tree + commandNested bool + commandHidden map[string]struct{} + subcommand string + subcommandArgs []string + topFlags []string + + // These are true when special global flags are set. We can/should + // probably use a bitset for this one day. + isHelp bool + isVersion bool + isAutocompleteInstall bool + isAutocompleteUninstall bool +} + +// NewClI returns a new CLI instance with sensible defaults. +func NewCLI(app, version string) *CLI { + return &CLI{ + Name: app, + Version: version, + HelpFunc: BasicHelpFunc(app), + Autocomplete: true, + } + +} + +// IsHelp returns whether or not the help flag is present within the +// arguments. +func (c *CLI) IsHelp() bool { + c.once.Do(c.init) + return c.isHelp +} + +// IsVersion returns whether or not the version flag is present within the +// arguments. +func (c *CLI) IsVersion() bool { + c.once.Do(c.init) + return c.isVersion +} + +// Run runs the actual CLI based on the arguments given. +func (c *CLI) Run() (int, error) { + c.once.Do(c.init) + + // If this is a autocompletion request, satisfy it. This must be called + // first before anything else since its possible to be autocompleting + // -help or -version or other flags and we want to show completions + // and not actually write the help or version. + if c.Autocomplete && c.autocomplete.Complete() { + return 0, nil + } + + // Just show the version and exit if instructed. + if c.IsVersion() && c.Version != "" { + c.HelpWriter.Write([]byte(c.Version + "\n")) + return 0, nil + } + + // Just print the help when only '-h' or '--help' is passed. + if c.IsHelp() && c.Subcommand() == "" { + c.HelpWriter.Write([]byte(c.HelpFunc(c.helpCommands(c.Subcommand())) + "\n")) + return 0, nil + } + + // If we're attempting to install or uninstall autocomplete then handle + if c.Autocomplete { + // Autocomplete requires the "Name" to be set so that we know what + // command to setup the autocomplete on. + if c.Name == "" { + return 1, fmt.Errorf( + "internal error: CLI.Name must be specified for autocomplete to work") + } + + // If both install and uninstall flags are specified, then error + if c.isAutocompleteInstall && c.isAutocompleteUninstall { + return 1, fmt.Errorf( + "Either the autocomplete install or uninstall flag may " + + "be specified, but not both.") + } + + // If the install flag is specified, perform the install or uninstall + if c.isAutocompleteInstall { + if err := c.autocompleteInstaller.Install(c.Name); err != nil { + return 1, err + } + + return 0, nil + } + + if c.isAutocompleteUninstall { + if err := c.autocompleteInstaller.Uninstall(c.Name); err != nil { + return 1, err + } + + return 0, nil + } + } + + // Attempt to get the factory function for creating the command + // implementation. If the command is invalid or blank, it is an error. + raw, ok := c.commandTree.Get(c.Subcommand()) + if !ok { + c.ErrorWriter.Write([]byte(c.HelpFunc(c.helpCommands(c.subcommandParent())) + "\n")) + return 127, nil + } + + command, err := raw.(CommandFactory)() + if err != nil { + return 1, err + } + + // If we've been instructed to just print the help, then print it + if c.IsHelp() { + c.commandHelp(c.HelpWriter, command) + return 0, nil + } + + // If there is an invalid flag, then error + if len(c.topFlags) > 0 { + c.ErrorWriter.Write([]byte( + "Invalid flags before the subcommand. If these flags are for\n" + + "the subcommand, please put them after the subcommand.\n\n")) + c.commandHelp(c.ErrorWriter, command) + return 1, nil + } + + code := command.Run(c.SubcommandArgs()) + if code == RunResultHelp { + // Requesting help + c.commandHelp(c.ErrorWriter, command) + return 1, nil + } + + return code, nil +} + +// Subcommand returns the subcommand that the CLI would execute. For +// example, a CLI from "--version version --help" would return a Subcommand +// of "version" +func (c *CLI) Subcommand() string { + c.once.Do(c.init) + return c.subcommand +} + +// SubcommandArgs returns the arguments that will be passed to the +// subcommand. +func (c *CLI) SubcommandArgs() []string { + c.once.Do(c.init) + return c.subcommandArgs +} + +// subcommandParent returns the parent of this subcommand, if there is one. +// If there isn't on, "" is returned. +func (c *CLI) subcommandParent() string { + // Get the subcommand, if it is "" alread just return + sub := c.Subcommand() + if sub == "" { + return sub + } + + // Clear any trailing spaces and find the last space + sub = strings.TrimRight(sub, " ") + idx := strings.LastIndex(sub, " ") + + if idx == -1 { + // No space means our parent is root + return "" + } + + return sub[:idx] +} + +func (c *CLI) init() { + if c.HelpFunc == nil { + c.HelpFunc = BasicHelpFunc("app") + + if c.Name != "" { + c.HelpFunc = BasicHelpFunc(c.Name) + } + } + + if c.HelpWriter == nil { + c.HelpWriter = os.Stderr + } + if c.ErrorWriter == nil { + c.ErrorWriter = c.HelpWriter + } + + // Build our hidden commands + if len(c.HiddenCommands) > 0 { + c.commandHidden = make(map[string]struct{}) + for _, h := range c.HiddenCommands { + c.commandHidden[h] = struct{}{} + } + } + + // Build our command tree + c.commandTree = radix.New() + c.commandNested = false + for k, v := range c.Commands { + k = strings.TrimSpace(k) + c.commandTree.Insert(k, v) + if strings.ContainsRune(k, ' ') { + c.commandNested = true + } + } + + // Go through the key and fill in any missing parent commands + if c.commandNested { + var walkFn radix.WalkFn + toInsert := make(map[string]struct{}) + walkFn = func(k string, raw interface{}) bool { + idx := strings.LastIndex(k, " ") + if idx == -1 { + // If there is no space, just ignore top level commands + return false + } + + // Trim up to that space so we can get the expected parent + k = k[:idx] + if _, ok := c.commandTree.Get(k); ok { + // Yay we have the parent! + return false + } + + // We're missing the parent, so let's insert this + toInsert[k] = struct{}{} + + // Call the walk function recursively so we check this one too + return walkFn(k, nil) + } + + // Walk! + c.commandTree.Walk(walkFn) + + // Insert any that we're missing + for k := range toInsert { + var f CommandFactory = func() (Command, error) { + return &MockCommand{ + HelpText: "This command is accessed by using one of the subcommands below.", + RunResult: RunResultHelp, + }, nil + } + + c.commandTree.Insert(k, f) + } + } + + // Setup autocomplete if we have it enabled. We have to do this after + // the command tree is setup so we can use the radix tree to easily find + // all subcommands. + if c.Autocomplete { + c.initAutocomplete() + } + + // Process the args + c.processArgs() +} + +func (c *CLI) initAutocomplete() { + if c.AutocompleteInstall == "" { + c.AutocompleteInstall = defaultAutocompleteInstall + } + + if c.AutocompleteUninstall == "" { + c.AutocompleteUninstall = defaultAutocompleteUninstall + } + + if c.autocompleteInstaller == nil { + c.autocompleteInstaller = &realAutocompleteInstaller{} + } + + // We first set c.autocomplete to a noop autocompleter that outputs + // to nul so that we can detect if we're autocompleting or not. If we're + // not, then we do nothing. This saves a LOT of compute cycles since + // initAutoCompleteSub has to walk every command. + c.autocomplete = complete.New(c.Name, complete.Command{}) + c.autocomplete.Out = ioutil.Discard + if !c.autocomplete.Complete() { + return + } + + // Build the root command + cmd := c.initAutocompleteSub("") + + // For the root, we add the global flags to the "Flags". This way + // they don't show up on every command. + if !c.AutocompleteNoDefaultFlags { + cmd.Flags = map[string]complete.Predictor{ + "-" + c.AutocompleteInstall: complete.PredictNothing, + "-" + c.AutocompleteUninstall: complete.PredictNothing, + "-help": complete.PredictNothing, + "-version": complete.PredictNothing, + } + } + cmd.GlobalFlags = c.AutocompleteGlobalFlags + + c.autocomplete = complete.New(c.Name, cmd) +} + +// initAutocompleteSub creates the complete.Command for a subcommand with +// the given prefix. This will continue recursively for all subcommands. +// The prefix "" (empty string) can be used for the root command. +func (c *CLI) initAutocompleteSub(prefix string) complete.Command { + var cmd complete.Command + walkFn := func(k string, raw interface{}) bool { + // Ignore the empty key which can be present for default commands. + if k == "" { + return false + } + + // Keep track of the full key so that we can nest further if necessary + fullKey := k + + if len(prefix) > 0 { + // If we have a prefix, trim the prefix + 1 (for the space) + // Example: turns "sub one" to "one" with prefix "sub" + k = k[len(prefix)+1:] + } + + if idx := strings.Index(k, " "); idx >= 0 { + // If there is a space, we trim up to the space. This turns + // "sub sub2 sub3" into "sub". The prefix trim above will + // trim our current depth properly. + k = k[:idx] + } + + if _, ok := cmd.Sub[k]; ok { + // If we already tracked this subcommand then ignore + return false + } + + // If the command is hidden, don't record it at all + if _, ok := c.commandHidden[fullKey]; ok { + return false + } + + if cmd.Sub == nil { + cmd.Sub = complete.Commands(make(map[string]complete.Command)) + } + subCmd := c.initAutocompleteSub(fullKey) + + // Instantiate the command so that we can check if the command is + // a CommandAutocomplete implementation. If there is an error + // creating the command, we just ignore it since that will be caught + // later. + impl, err := raw.(CommandFactory)() + if err != nil { + impl = nil + } + + // Check if it implements ComandAutocomplete. If so, setup the autocomplete + if c, ok := impl.(CommandAutocomplete); ok { + subCmd.Args = c.AutocompleteArgs() + subCmd.Flags = c.AutocompleteFlags() + } + + cmd.Sub[k] = subCmd + return false + } + + walkPrefix := prefix + if walkPrefix != "" { + walkPrefix += " " + } + + c.commandTree.WalkPrefix(walkPrefix, walkFn) + return cmd +} + +func (c *CLI) commandHelp(out io.Writer, command Command) { + // Get the template to use + tpl := strings.TrimSpace(defaultHelpTemplate) + if t, ok := command.(CommandHelpTemplate); ok { + tpl = t.HelpTemplate() + } + if !strings.HasSuffix(tpl, "\n") { + tpl += "\n" + } + + // Parse it + t, err := template.New("root").Funcs(sprig.TxtFuncMap()).Parse(tpl) + if err != nil { + t = template.Must(template.New("root").Parse(fmt.Sprintf( + "Internal error! Failed to parse command help template: %s\n", err))) + } + + // Template data + data := map[string]interface{}{ + "Name": c.Name, + "SubcommandName": c.Subcommand(), + "Help": command.Help(), + } + + // Build subcommand list if we have it + var subcommandsTpl []map[string]interface{} + if c.commandNested { + // Get the matching keys + subcommands := c.helpCommands(c.Subcommand()) + keys := make([]string, 0, len(subcommands)) + for k := range subcommands { + keys = append(keys, k) + } + + // Sort the keys + sort.Strings(keys) + + // Figure out the padding length + var longest int + for _, k := range keys { + if v := len(k); v > longest { + longest = v + } + } + + // Go through and create their structures + subcommandsTpl = make([]map[string]interface{}, 0, len(subcommands)) + for _, k := range keys { + // Get the command + raw, ok := subcommands[k] + if !ok { + c.ErrorWriter.Write([]byte(fmt.Sprintf( + "Error getting subcommand %q", k))) + } + sub, err := raw() + if err != nil { + c.ErrorWriter.Write([]byte(fmt.Sprintf( + "Error instantiating %q: %s", k, err))) + } + + // Find the last space and make sure we only include that last part + name := k + if idx := strings.LastIndex(k, " "); idx > -1 { + name = name[idx+1:] + } + + subcommandsTpl = append(subcommandsTpl, map[string]interface{}{ + "Name": name, + "NameAligned": name + strings.Repeat(" ", longest-len(k)), + "Help": sub.Help(), + "Synopsis": sub.Synopsis(), + }) + } + } + data["Subcommands"] = subcommandsTpl + + // Write + err = t.Execute(out, data) + if err == nil { + return + } + + // An error, just output... + c.ErrorWriter.Write([]byte(fmt.Sprintf( + "Internal error rendering help: %s", err))) +} + +// helpCommands returns the subcommands for the HelpFunc argument. +// This will only contain immediate subcommands. +func (c *CLI) helpCommands(prefix string) map[string]CommandFactory { + // If our prefix isn't empty, make sure it ends in ' ' + if prefix != "" && prefix[len(prefix)-1] != ' ' { + prefix += " " + } + + // Get all the subkeys of this command + var keys []string + c.commandTree.WalkPrefix(prefix, func(k string, raw interface{}) bool { + // Ignore any sub-sub keys, i.e. "foo bar baz" when we want "foo bar" + if !strings.Contains(k[len(prefix):], " ") { + keys = append(keys, k) + } + + return false + }) + + // For each of the keys return that in the map + result := make(map[string]CommandFactory, len(keys)) + for _, k := range keys { + raw, ok := c.commandTree.Get(k) + if !ok { + // We just got it via WalkPrefix above, so we just panic + panic("not found: " + k) + } + + // If this is a hidden command, don't show it + if _, ok := c.commandHidden[k]; ok { + continue + } + + result[k] = raw.(CommandFactory) + } + + return result +} + +func (c *CLI) processArgs() { + for i, arg := range c.Args { + if arg == "--" { + break + } + + // Check for help flags. + if arg == "-h" || arg == "-help" || arg == "--help" { + c.isHelp = true + continue + } + + // Check for autocomplete flags + if c.Autocomplete { + if arg == "-"+c.AutocompleteInstall || arg == "--"+c.AutocompleteInstall { + c.isAutocompleteInstall = true + continue + } + + if arg == "-"+c.AutocompleteUninstall || arg == "--"+c.AutocompleteUninstall { + c.isAutocompleteUninstall = true + continue + } + } + + if c.subcommand == "" { + // Check for version flags if not in a subcommand. + if arg == "-v" || arg == "-version" || arg == "--version" { + c.isVersion = true + continue + } + + if arg != "" && arg[0] == '-' { + // Record the arg... + c.topFlags = append(c.topFlags, arg) + } + } + + // If we didn't find a subcommand yet and this is the first non-flag + // argument, then this is our subcommand. + if c.subcommand == "" && arg != "" && arg[0] != '-' { + c.subcommand = arg + if c.commandNested { + // If the command has a space in it, then it is invalid. + // Set a blank command so that it fails. + if strings.ContainsRune(arg, ' ') { + c.subcommand = "" + return + } + + // Determine the argument we look to to end subcommands. + // We look at all arguments until one is a flag or has a space. + // This disallows commands like: ./cli foo "bar baz". An + // argument with a space is always an argument. A blank + // argument is always an argument. + j := 0 + for k, v := range c.Args[i:] { + if strings.ContainsRune(v, ' ') || v == "" || v[0] == '-' { + break + } + + j = i + k + 1 + } + + // Nested CLI, the subcommand is actually the entire + // arg list up to a flag that is still a valid subcommand. + searchKey := strings.Join(c.Args[i:j], " ") + k, _, ok := c.commandTree.LongestPrefix(searchKey) + if ok { + // k could be a prefix that doesn't contain the full + // command such as "foo" instead of "foobar", so we + // need to verify that we have an entire key. To do that, + // we look for an ending in a space or an end of string. + reVerify := regexp.MustCompile(regexp.QuoteMeta(k) + `( |$)`) + if reVerify.MatchString(searchKey) { + c.subcommand = k + i += strings.Count(k, " ") + } + } + } + + // The remaining args the subcommand arguments + c.subcommandArgs = c.Args[i+1:] + } + } + + // If we never found a subcommand and support a default command, then + // switch to using that. + if c.subcommand == "" { + if _, ok := c.Commands[""]; ok { + args := c.topFlags + args = append(args, c.subcommandArgs...) + c.topFlags = nil + c.subcommandArgs = args + } + } +} + +// defaultAutocompleteInstall and defaultAutocompleteUninstall are the +// default values for the autocomplete install and uninstall flags. +const defaultAutocompleteInstall = "autocomplete-install" +const defaultAutocompleteUninstall = "autocomplete-uninstall" + +const defaultHelpTemplate = ` +{{.Help}}{{if gt (len .Subcommands) 0}} + +Subcommands: +{{- range $value := .Subcommands }} + {{ $value.NameAligned }} {{ $value.Synopsis }}{{ end }} +{{- end }} +` diff --git a/vendor/github.com/mitchellh/cli/command.go b/vendor/github.com/mitchellh/cli/command.go new file mode 100644 index 000000000..bed11faf5 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/command.go @@ -0,0 +1,67 @@ +package cli + +import ( + "github.com/posener/complete" +) + +const ( + // RunResultHelp is a value that can be returned from Run to signal + // to the CLI to render the help output. + RunResultHelp = -18511 +) + +// A command is a runnable sub-command of a CLI. +type Command interface { + // Help should return long-form help text that includes the command-line + // usage, a brief few sentences explaining the function of the command, + // and the complete list of flags the command accepts. + Help() string + + // Run should run the actual command with the given CLI instance and + // command-line arguments. It should return the exit status when it is + // finished. + // + // There are a handful of special exit codes this can return documented + // above that change behavior. + Run(args []string) int + + // Synopsis should return a one-line, short synopsis of the command. + // This should be less than 50 characters ideally. + Synopsis() string +} + +// CommandAutocomplete is an extension of Command that enables fine-grained +// autocompletion. Subcommand autocompletion will work even if this interface +// is not implemented. By implementing this interface, more advanced +// autocompletion is enabled. +type CommandAutocomplete interface { + // AutocompleteArgs returns the argument predictor for this command. + // If argument completion is not supported, this should return + // complete.PredictNothing. + AutocompleteArgs() complete.Predictor + + // AutocompleteFlags returns a mapping of supported flags and autocomplete + // options for this command. The map key for the Flags map should be the + // complete flag such as "-foo" or "--foo". + AutocompleteFlags() complete.Flags +} + +// CommandHelpTemplate is an extension of Command that also has a function +// for returning a template for the help rather than the help itself. In +// this scenario, both Help and HelpTemplate should be implemented. +// +// If CommandHelpTemplate isn't implemented, the Help is output as-is. +type CommandHelpTemplate interface { + // HelpTemplate is the template in text/template format to use for + // displaying the Help. The keys available are: + // + // * ".Help" - The help text itself + // * ".Subcommands" + // + HelpTemplate() string +} + +// CommandFactory is a type of function that is a factory for commands. +// We need a factory because we may need to setup some state on the +// struct that implements the command itself. +type CommandFactory func() (Command, error) diff --git a/vendor/github.com/mitchellh/cli/command_mock.go b/vendor/github.com/mitchellh/cli/command_mock.go new file mode 100644 index 000000000..7a584b7e9 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/command_mock.go @@ -0,0 +1,63 @@ +package cli + +import ( + "github.com/posener/complete" +) + +// MockCommand is an implementation of Command that can be used for tests. +// It is publicly exported from this package in case you want to use it +// externally. +type MockCommand struct { + // Settable + HelpText string + RunResult int + SynopsisText string + + // Set by the command + RunCalled bool + RunArgs []string +} + +func (c *MockCommand) Help() string { + return c.HelpText +} + +func (c *MockCommand) Run(args []string) int { + c.RunCalled = true + c.RunArgs = args + + return c.RunResult +} + +func (c *MockCommand) Synopsis() string { + return c.SynopsisText +} + +// MockCommandAutocomplete is an implementation of CommandAutocomplete. +type MockCommandAutocomplete struct { + MockCommand + + // Settable + AutocompleteArgsValue complete.Predictor + AutocompleteFlagsValue complete.Flags +} + +func (c *MockCommandAutocomplete) AutocompleteArgs() complete.Predictor { + return c.AutocompleteArgsValue +} + +func (c *MockCommandAutocomplete) AutocompleteFlags() complete.Flags { + return c.AutocompleteFlagsValue +} + +// MockCommandHelpTemplate is an implementation of CommandHelpTemplate. +type MockCommandHelpTemplate struct { + MockCommand + + // Settable + HelpTemplateText string +} + +func (c *MockCommandHelpTemplate) HelpTemplate() string { + return c.HelpTemplateText +} diff --git a/vendor/github.com/mitchellh/cli/help.go b/vendor/github.com/mitchellh/cli/help.go new file mode 100644 index 000000000..f5ca58f59 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/help.go @@ -0,0 +1,79 @@ +package cli + +import ( + "bytes" + "fmt" + "log" + "sort" + "strings" +) + +// HelpFunc is the type of the function that is responsible for generating +// the help output when the CLI must show the general help text. +type HelpFunc func(map[string]CommandFactory) string + +// BasicHelpFunc generates some basic help output that is usually good enough +// for most CLI applications. +func BasicHelpFunc(app string) HelpFunc { + return func(commands map[string]CommandFactory) string { + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf( + "Usage: %s [--version] [--help] []\n\n", + app)) + buf.WriteString("Available commands are:\n") + + // Get the list of keys so we can sort them, and also get the maximum + // key length so they can be aligned properly. + keys := make([]string, 0, len(commands)) + maxKeyLen := 0 + for key := range commands { + if len(key) > maxKeyLen { + maxKeyLen = len(key) + } + + keys = append(keys, key) + } + sort.Strings(keys) + + for _, key := range keys { + commandFunc, ok := commands[key] + if !ok { + // This should never happen since we JUST built the list of + // keys. + panic("command not found: " + key) + } + + command, err := commandFunc() + if err != nil { + log.Printf("[ERR] cli: Command '%s' failed to load: %s", + key, err) + continue + } + + key = fmt.Sprintf("%s%s", key, strings.Repeat(" ", maxKeyLen-len(key))) + buf.WriteString(fmt.Sprintf(" %s %s\n", key, command.Synopsis())) + } + + return buf.String() + } +} + +// FilteredHelpFunc will filter the commands to only include the keys +// in the include parameter. +func FilteredHelpFunc(include []string, f HelpFunc) HelpFunc { + return func(commands map[string]CommandFactory) string { + set := make(map[string]struct{}) + for _, k := range include { + set[k] = struct{}{} + } + + filtered := make(map[string]CommandFactory) + for k, f := range commands { + if _, ok := set[k]; ok { + filtered[k] = f + } + } + + return f(filtered) + } +} diff --git a/vendor/github.com/mitchellh/cli/ui.go b/vendor/github.com/mitchellh/cli/ui.go new file mode 100644 index 000000000..a2d6f94f4 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/ui.go @@ -0,0 +1,187 @@ +package cli + +import ( + "bufio" + "errors" + "fmt" + "io" + "os" + "os/signal" + "strings" + + "github.com/bgentry/speakeasy" + "github.com/mattn/go-isatty" +) + +// Ui is an interface for interacting with the terminal, or "interface" +// of a CLI. This abstraction doesn't have to be used, but helps provide +// a simple, layerable way to manage user interactions. +type Ui interface { + // Ask asks the user for input using the given query. The response is + // returned as the given string, or an error. + Ask(string) (string, error) + + // AskSecret asks the user for input using the given query, but does not echo + // the keystrokes to the terminal. + AskSecret(string) (string, error) + + // Output is called for normal standard output. + Output(string) + + // Info is called for information related to the previous output. + // In general this may be the exact same as Output, but this gives + // Ui implementors some flexibility with output formats. + Info(string) + + // Error is used for any error messages that might appear on standard + // error. + Error(string) + + // Warn is used for any warning messages that might appear on standard + // error. + Warn(string) +} + +// BasicUi is an implementation of Ui that just outputs to the given +// writer. This UI is not threadsafe by default, but you can wrap it +// in a ConcurrentUi to make it safe. +type BasicUi struct { + Reader io.Reader + Writer io.Writer + ErrorWriter io.Writer +} + +func (u *BasicUi) Ask(query string) (string, error) { + return u.ask(query, false) +} + +func (u *BasicUi) AskSecret(query string) (string, error) { + return u.ask(query, true) +} + +func (u *BasicUi) ask(query string, secret bool) (string, error) { + if _, err := fmt.Fprint(u.Writer, query+" "); err != nil { + return "", err + } + + // Register for interrupts so that we can catch it and immediately + // return... + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, os.Interrupt) + defer signal.Stop(sigCh) + + // Ask for input in a go-routine so that we can ignore it. + errCh := make(chan error, 1) + lineCh := make(chan string, 1) + go func() { + var line string + var err error + if secret && isatty.IsTerminal(os.Stdin.Fd()) { + line, err = speakeasy.Ask("") + } else { + r := bufio.NewReader(u.Reader) + line, err = r.ReadString('\n') + } + if err != nil { + errCh <- err + return + } + + lineCh <- strings.TrimRight(line, "\r\n") + }() + + select { + case err := <-errCh: + return "", err + case line := <-lineCh: + return line, nil + case <-sigCh: + // Print a newline so that any further output starts properly + // on a new line. + fmt.Fprintln(u.Writer) + + return "", errors.New("interrupted") + } +} + +func (u *BasicUi) Error(message string) { + w := u.Writer + if u.ErrorWriter != nil { + w = u.ErrorWriter + } + + fmt.Fprint(w, message) + fmt.Fprint(w, "\n") +} + +func (u *BasicUi) Info(message string) { + u.Output(message) +} + +func (u *BasicUi) Output(message string) { + fmt.Fprint(u.Writer, message) + fmt.Fprint(u.Writer, "\n") +} + +func (u *BasicUi) Warn(message string) { + u.Error(message) +} + +// PrefixedUi is an implementation of Ui that prefixes messages. +type PrefixedUi struct { + AskPrefix string + AskSecretPrefix string + OutputPrefix string + InfoPrefix string + ErrorPrefix string + WarnPrefix string + Ui Ui +} + +func (u *PrefixedUi) Ask(query string) (string, error) { + if query != "" { + query = fmt.Sprintf("%s%s", u.AskPrefix, query) + } + + return u.Ui.Ask(query) +} + +func (u *PrefixedUi) AskSecret(query string) (string, error) { + if query != "" { + query = fmt.Sprintf("%s%s", u.AskSecretPrefix, query) + } + + return u.Ui.AskSecret(query) +} + +func (u *PrefixedUi) Error(message string) { + if message != "" { + message = fmt.Sprintf("%s%s", u.ErrorPrefix, message) + } + + u.Ui.Error(message) +} + +func (u *PrefixedUi) Info(message string) { + if message != "" { + message = fmt.Sprintf("%s%s", u.InfoPrefix, message) + } + + u.Ui.Info(message) +} + +func (u *PrefixedUi) Output(message string) { + if message != "" { + message = fmt.Sprintf("%s%s", u.OutputPrefix, message) + } + + u.Ui.Output(message) +} + +func (u *PrefixedUi) Warn(message string) { + if message != "" { + message = fmt.Sprintf("%s%s", u.WarnPrefix, message) + } + + u.Ui.Warn(message) +} diff --git a/vendor/github.com/mitchellh/cli/ui_colored.go b/vendor/github.com/mitchellh/cli/ui_colored.go new file mode 100644 index 000000000..b0ec44840 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/ui_colored.go @@ -0,0 +1,73 @@ +package cli + +import ( + "github.com/fatih/color" +) + +const ( + noColor = -1 +) + +// UiColor is a posix shell color code to use. +type UiColor struct { + Code int + Bold bool +} + +// A list of colors that are useful. These are all non-bolded by default. +var ( + UiColorNone UiColor = UiColor{noColor, false} + UiColorRed = UiColor{int(color.FgHiRed), false} + UiColorGreen = UiColor{int(color.FgHiGreen), false} + UiColorYellow = UiColor{int(color.FgHiYellow), false} + UiColorBlue = UiColor{int(color.FgHiBlue), false} + UiColorMagenta = UiColor{int(color.FgHiMagenta), false} + UiColorCyan = UiColor{int(color.FgHiCyan), false} +) + +// ColoredUi is a Ui implementation that colors its output according +// to the given color schemes for the given type of output. +type ColoredUi struct { + OutputColor UiColor + InfoColor UiColor + ErrorColor UiColor + WarnColor UiColor + Ui Ui +} + +func (u *ColoredUi) Ask(query string) (string, error) { + return u.Ui.Ask(u.colorize(query, u.OutputColor)) +} + +func (u *ColoredUi) AskSecret(query string) (string, error) { + return u.Ui.AskSecret(u.colorize(query, u.OutputColor)) +} + +func (u *ColoredUi) Output(message string) { + u.Ui.Output(u.colorize(message, u.OutputColor)) +} + +func (u *ColoredUi) Info(message string) { + u.Ui.Info(u.colorize(message, u.InfoColor)) +} + +func (u *ColoredUi) Error(message string) { + u.Ui.Error(u.colorize(message, u.ErrorColor)) +} + +func (u *ColoredUi) Warn(message string) { + u.Ui.Warn(u.colorize(message, u.WarnColor)) +} + +func (u *ColoredUi) colorize(message string, uc UiColor) string { + if uc.Code == noColor { + return message + } + + attr := []color.Attribute{color.Attribute(uc.Code)} + if uc.Bold { + attr = append(attr, color.Bold) + } + + return color.New(attr...).SprintFunc()(message) +} diff --git a/vendor/github.com/mitchellh/cli/ui_concurrent.go b/vendor/github.com/mitchellh/cli/ui_concurrent.go new file mode 100644 index 000000000..b4f4dbfaa --- /dev/null +++ b/vendor/github.com/mitchellh/cli/ui_concurrent.go @@ -0,0 +1,54 @@ +package cli + +import ( + "sync" +) + +// ConcurrentUi is a wrapper around a Ui interface (and implements that +// interface) making the underlying Ui concurrency safe. +type ConcurrentUi struct { + Ui Ui + l sync.Mutex +} + +func (u *ConcurrentUi) Ask(query string) (string, error) { + u.l.Lock() + defer u.l.Unlock() + + return u.Ui.Ask(query) +} + +func (u *ConcurrentUi) AskSecret(query string) (string, error) { + u.l.Lock() + defer u.l.Unlock() + + return u.Ui.AskSecret(query) +} + +func (u *ConcurrentUi) Error(message string) { + u.l.Lock() + defer u.l.Unlock() + + u.Ui.Error(message) +} + +func (u *ConcurrentUi) Info(message string) { + u.l.Lock() + defer u.l.Unlock() + + u.Ui.Info(message) +} + +func (u *ConcurrentUi) Output(message string) { + u.l.Lock() + defer u.l.Unlock() + + u.Ui.Output(message) +} + +func (u *ConcurrentUi) Warn(message string) { + u.l.Lock() + defer u.l.Unlock() + + u.Ui.Warn(message) +} diff --git a/vendor/github.com/mitchellh/cli/ui_mock.go b/vendor/github.com/mitchellh/cli/ui_mock.go new file mode 100644 index 000000000..935f28a4a --- /dev/null +++ b/vendor/github.com/mitchellh/cli/ui_mock.go @@ -0,0 +1,116 @@ +package cli + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strings" + "sync" +) + +// NewMockUi returns a fully initialized MockUi instance +// which is safe for concurrent use. +func NewMockUi() *MockUi { + m := new(MockUi) + m.once.Do(m.init) + return m +} + +// MockUi is a mock UI that is used for tests and is exported publicly +// for use in external tests if needed as well. Do not instantite this +// directly since the buffers will be initialized on the first write. If +// there is no write then you will get a nil panic. Please use the +// NewMockUi() constructor function instead. You can fix your code with +// +// sed -i -e 's/new(cli.MockUi)/cli.NewMockUi()/g' *_test.go +type MockUi struct { + InputReader io.Reader + ErrorWriter *syncBuffer + OutputWriter *syncBuffer + + once sync.Once +} + +func (u *MockUi) Ask(query string) (string, error) { + u.once.Do(u.init) + + var result string + fmt.Fprint(u.OutputWriter, query) + r := bufio.NewReader(u.InputReader) + line, err := r.ReadString('\n') + if err != nil { + return "", err + } + result = strings.TrimRight(line, "\r\n") + + return result, nil +} + +func (u *MockUi) AskSecret(query string) (string, error) { + return u.Ask(query) +} + +func (u *MockUi) Error(message string) { + u.once.Do(u.init) + + fmt.Fprint(u.ErrorWriter, message) + fmt.Fprint(u.ErrorWriter, "\n") +} + +func (u *MockUi) Info(message string) { + u.Output(message) +} + +func (u *MockUi) Output(message string) { + u.once.Do(u.init) + + fmt.Fprint(u.OutputWriter, message) + fmt.Fprint(u.OutputWriter, "\n") +} + +func (u *MockUi) Warn(message string) { + u.once.Do(u.init) + + fmt.Fprint(u.ErrorWriter, message) + fmt.Fprint(u.ErrorWriter, "\n") +} + +func (u *MockUi) init() { + u.ErrorWriter = new(syncBuffer) + u.OutputWriter = new(syncBuffer) +} + +type syncBuffer struct { + sync.RWMutex + b bytes.Buffer +} + +func (b *syncBuffer) Write(data []byte) (int, error) { + b.Lock() + defer b.Unlock() + return b.b.Write(data) +} + +func (b *syncBuffer) Read(data []byte) (int, error) { + b.RLock() + defer b.RUnlock() + return b.b.Read(data) +} + +func (b *syncBuffer) Reset() { + b.Lock() + b.b.Reset() + b.Unlock() +} + +func (b *syncBuffer) String() string { + return string(b.Bytes()) +} + +func (b *syncBuffer) Bytes() []byte { + b.RLock() + data := b.b.Bytes() + b.RUnlock() + return data +} diff --git a/vendor/github.com/mitchellh/cli/ui_writer.go b/vendor/github.com/mitchellh/cli/ui_writer.go new file mode 100644 index 000000000..1e1db3cf6 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/ui_writer.go @@ -0,0 +1,18 @@ +package cli + +// UiWriter is an io.Writer implementation that can be used with +// loggers that writes every line of log output data to a Ui at the +// Info level. +type UiWriter struct { + Ui Ui +} + +func (w *UiWriter) Write(p []byte) (n int, err error) { + n = len(p) + if n > 0 && p[n-1] == '\n' { + p = p[:n-1] + } + + w.Ui.Info(string(p)) + return n, nil +} diff --git a/vendor/github.com/posener/complete/.gitignore b/vendor/github.com/posener/complete/.gitignore new file mode 100644 index 000000000..293955f99 --- /dev/null +++ b/vendor/github.com/posener/complete/.gitignore @@ -0,0 +1,4 @@ +.idea +coverage.txt +gocomplete/gocomplete +example/self/self diff --git a/vendor/github.com/posener/complete/.travis.yml b/vendor/github.com/posener/complete/.travis.yml new file mode 100644 index 000000000..6ba8d865b --- /dev/null +++ b/vendor/github.com/posener/complete/.travis.yml @@ -0,0 +1,16 @@ +language: go +go: + - tip + - 1.12.x + - 1.11.x + - 1.10.x + +script: + - go test -race -coverprofile=coverage.txt -covermode=atomic ./... + +after_success: + - bash <(curl -s https://codecov.io/bash) + +matrix: + allow_failures: + - go: tip \ No newline at end of file diff --git a/vendor/github.com/posener/complete/LICENSE.txt b/vendor/github.com/posener/complete/LICENSE.txt new file mode 100644 index 000000000..16249b4a1 --- /dev/null +++ b/vendor/github.com/posener/complete/LICENSE.txt @@ -0,0 +1,21 @@ +The MIT License + +Copyright (c) 2017 Eyal Posener + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/posener/complete/README.md b/vendor/github.com/posener/complete/README.md new file mode 100644 index 000000000..dcc6c8932 --- /dev/null +++ b/vendor/github.com/posener/complete/README.md @@ -0,0 +1,131 @@ +# complete + +[![Build Status](https://travis-ci.org/posener/complete.svg?branch=master)](https://travis-ci.org/posener/complete) +[![codecov](https://codecov.io/gh/posener/complete/branch/master/graph/badge.svg)](https://codecov.io/gh/posener/complete) +[![golangci](https://golangci.com/badges/github.com/posener/complete.svg)](https://golangci.com/r/github.com/posener/complete) +[![GoDoc](https://godoc.org/github.com/posener/complete?status.svg)](http://godoc.org/github.com/posener/complete) +[![goreadme](https://goreadme.herokuapp.com/badge/posener/complete.svg)](https://goreadme.herokuapp.com) + +Package complete provides a tool for bash writing bash completion in go, and bash completion for the go command line. + +Writing bash completion scripts is a hard work. This package provides an easy way +to create bash completion scripts for any command, and also an easy way to install/uninstall +the completion of the command. + +#### Go Command Bash Completion + +In [./cmd/gocomplete](./cmd/gocomplete) there is an example for bash completion for the `go` command line. + +This is an example that uses the `complete` package on the `go` command - the `complete` package +can also be used to implement any completions, see #usage. + +#### Install + +1. Type in your shell: + +```go +go get -u github.com/posener/complete/gocomplete +gocomplete -install +``` + +2. Restart your shell + +Uninstall by `gocomplete -uninstall` + +#### Features + +- Complete `go` command, including sub commands and all flags. +- Complete packages names or `.go` files when necessary. +- Complete test names after `-run` flag. + +#### Complete package + +Supported shells: + +- [x] bash +- [x] zsh +- [x] fish + +#### Usage + +Assuming you have program called `run` and you want to have bash completion +for it, meaning, if you type `run` then space, then press the `Tab` key, +the shell will suggest relevant complete options. + +In that case, we will create a program called `runcomplete`, a go program, +with a `func main()` and so, that will make the completion of the `run` +program. Once the `runcomplete` will be in a binary form, we could +`runcomplete -install` and that will add to our shell all the bash completion +options for `run`. + +So here it is: + +```go +import "github.com/posener/complete" + +func main() { + + // create a Command object, that represents the command we want + // to complete. + run := complete.Command{ + + // Sub defines a list of sub commands of the program, + // this is recursive, since every command is of type command also. + Sub: complete.Commands{ + + // add a build sub command + "build": complete.Command { + + // define flags of the build sub command + Flags: complete.Flags{ + // build sub command has a flag '-cpus', which + // expects number of cpus after it. in that case + // anything could complete this flag. + "-cpus": complete.PredictAnything, + }, + }, + }, + + // define flags of the 'run' main command + Flags: complete.Flags{ + // a flag -o, which expects a file ending with .out after + // it, the tab completion will auto complete for files matching + // the given pattern. + "-o": complete.PredictFiles("*.out"), + }, + + // define global flags of the 'run' main command + // those will show up also when a sub command was entered in the + // command line + GlobalFlags: complete.Flags{ + + // a flag '-h' which does not expects anything after it + "-h": complete.PredictNothing, + }, + } + + // run the command completion, as part of the main() function. + // this triggers the autocompletion when needed. + // name must be exactly as the binary that we want to complete. + complete.New("run", run).Run() +} +``` + +#### Self completing program + +In case that the program that we want to complete is written in go we +can make it self completing. +Here is an example: [./example/self/main.go](./example/self/main.go) . + +## Sub Packages + +* [cmd](./cmd): Package cmd used for command line options for the complete tool + +* [gocomplete](./gocomplete): Package main is complete tool for the go command line + +* [match](./match): Package match contains matchers that decide if to apply completion. + + +--- + +Created by [goreadme](https://github.com/apps/goreadme) diff --git a/vendor/github.com/posener/complete/args.go b/vendor/github.com/posener/complete/args.go new file mode 100644 index 000000000..3340285e1 --- /dev/null +++ b/vendor/github.com/posener/complete/args.go @@ -0,0 +1,114 @@ +package complete + +import ( + "os" + "path/filepath" + "strings" + "unicode" +) + +// Args describes command line arguments +type Args struct { + // All lists of all arguments in command line (not including the command itself) + All []string + // Completed lists of all completed arguments in command line, + // If the last one is still being typed - no space after it, + // it won't appear in this list of arguments. + Completed []string + // Last argument in command line, the one being typed, if the last + // character in the command line is a space, this argument will be empty, + // otherwise this would be the last word. + Last string + // LastCompleted is the last argument that was fully typed. + // If the last character in the command line is space, this would be the + // last word, otherwise, it would be the word before that. + LastCompleted string +} + +// Directory gives the directory of the current written +// last argument if it represents a file name being written. +// in case that it is not, we fall back to the current directory. +// +// Deprecated. +func (a Args) Directory() string { + if info, err := os.Stat(a.Last); err == nil && info.IsDir() { + return fixPathForm(a.Last, a.Last) + } + dir := filepath.Dir(a.Last) + if info, err := os.Stat(dir); err != nil || !info.IsDir() { + return "./" + } + return fixPathForm(a.Last, dir) +} + +func newArgs(line string) Args { + var ( + all []string + completed []string + ) + parts := splitFields(line) + if len(parts) > 0 { + all = parts[1:] + completed = removeLast(parts[1:]) + } + return Args{ + All: all, + Completed: completed, + Last: last(parts), + LastCompleted: last(completed), + } +} + +// splitFields returns a list of fields from the given command line. +// If the last character is space, it appends an empty field in the end +// indicating that the field before it was completed. +// If the last field is of the form "a=b", it splits it to two fields: "a", "b", +// So it can be completed. +func splitFields(line string) []string { + parts := strings.Fields(line) + + // Add empty field if the last field was completed. + if len(line) > 0 && unicode.IsSpace(rune(line[len(line)-1])) { + parts = append(parts, "") + } + + // Treat the last field if it is of the form "a=b" + parts = splitLastEqual(parts) + return parts +} + +func splitLastEqual(line []string) []string { + if len(line) == 0 { + return line + } + parts := strings.Split(line[len(line)-1], "=") + return append(line[:len(line)-1], parts...) +} + +// from returns a copy of Args of all arguments after the i'th argument. +func (a Args) from(i int) Args { + if i >= len(a.All) { + i = len(a.All) - 1 + } + a.All = a.All[i+1:] + + if i >= len(a.Completed) { + i = len(a.Completed) - 1 + } + a.Completed = a.Completed[i+1:] + return a +} + +func removeLast(a []string) []string { + if len(a) > 0 { + return a[:len(a)-1] + } + return a +} + +func last(args []string) string { + if len(args) == 0 { + return "" + } + return args[len(args)-1] +} diff --git a/vendor/github.com/posener/complete/cmd/cmd.go b/vendor/github.com/posener/complete/cmd/cmd.go new file mode 100644 index 000000000..b99fe5290 --- /dev/null +++ b/vendor/github.com/posener/complete/cmd/cmd.go @@ -0,0 +1,128 @@ +// Package cmd used for command line options for the complete tool +package cmd + +import ( + "errors" + "flag" + "fmt" + "os" + "strings" + + "github.com/posener/complete/cmd/install" +) + +// CLI for command line +type CLI struct { + Name string + InstallName string + UninstallName string + + install bool + uninstall bool + yes bool +} + +const ( + defaultInstallName = "install" + defaultUninstallName = "uninstall" +) + +// Run is used when running complete in command line mode. +// this is used when the complete is not completing words, but to +// install it or uninstall it. +func (f *CLI) Run() bool { + err := f.validate() + if err != nil { + os.Stderr.WriteString(err.Error() + "\n") + os.Exit(1) + } + + switch { + case f.install: + f.prompt() + err = install.Install(f.Name) + case f.uninstall: + f.prompt() + err = install.Uninstall(f.Name) + default: + // non of the action flags matched, + // returning false should make the real program execute + return false + } + + if err != nil { + fmt.Printf("%s failed! %s\n", f.action(), err) + os.Exit(3) + } + fmt.Println("Done!") + return true +} + +// prompt use for approval +// exit if approval was not given +func (f *CLI) prompt() { + defer fmt.Println(f.action() + "ing...") + if f.yes { + return + } + fmt.Printf("%s completion for %s? ", f.action(), f.Name) + var answer string + fmt.Scanln(&answer) + + switch strings.ToLower(answer) { + case "y", "yes": + return + default: + fmt.Println("Cancelling...") + os.Exit(1) + } +} + +// AddFlags adds the CLI flags to the flag set. +// If flags is nil, the default command line flags will be taken. +// Pass non-empty strings as installName and uninstallName to override the default +// flag names. +func (f *CLI) AddFlags(flags *flag.FlagSet) { + if flags == nil { + flags = flag.CommandLine + } + + if f.InstallName == "" { + f.InstallName = defaultInstallName + } + if f.UninstallName == "" { + f.UninstallName = defaultUninstallName + } + + if flags.Lookup(f.InstallName) == nil { + flags.BoolVar(&f.install, f.InstallName, false, + fmt.Sprintf("Install completion for %s command", f.Name)) + } + if flags.Lookup(f.UninstallName) == nil { + flags.BoolVar(&f.uninstall, f.UninstallName, false, + fmt.Sprintf("Uninstall completion for %s command", f.Name)) + } + if flags.Lookup("y") == nil { + flags.BoolVar(&f.yes, "y", false, "Don't prompt user for typing 'yes' when installing completion") + } +} + +// validate the CLI +func (f *CLI) validate() error { + if f.install && f.uninstall { + return errors.New("Install and uninstall are mutually exclusive") + } + return nil +} + +// action name according to the CLI values. +func (f *CLI) action() string { + switch { + case f.install: + return "Install" + case f.uninstall: + return "Uninstall" + default: + return "unknown" + } +} diff --git a/vendor/github.com/posener/complete/cmd/install/bash.go b/vendor/github.com/posener/complete/cmd/install/bash.go new file mode 100644 index 000000000..17c64de13 --- /dev/null +++ b/vendor/github.com/posener/complete/cmd/install/bash.go @@ -0,0 +1,37 @@ +package install + +import "fmt" + +// (un)install in bash +// basically adds/remove from .bashrc: +// +// complete -C +type bash struct { + rc string +} + +func (b bash) IsInstalled(cmd, bin string) bool { + completeCmd := b.cmd(cmd, bin) + return lineInFile(b.rc, completeCmd) +} + +func (b bash) Install(cmd, bin string) error { + if b.IsInstalled(cmd, bin) { + return fmt.Errorf("already installed in %s", b.rc) + } + completeCmd := b.cmd(cmd, bin) + return appendToFile(b.rc, completeCmd) +} + +func (b bash) Uninstall(cmd, bin string) error { + if !b.IsInstalled(cmd, bin) { + return fmt.Errorf("does not installed in %s", b.rc) + } + + completeCmd := b.cmd(cmd, bin) + return removeFromFile(b.rc, completeCmd) +} + +func (bash) cmd(cmd, bin string) string { + return fmt.Sprintf("complete -C %s %s", bin, cmd) +} diff --git a/vendor/github.com/posener/complete/cmd/install/fish.go b/vendor/github.com/posener/complete/cmd/install/fish.go new file mode 100644 index 000000000..2b64bfc83 --- /dev/null +++ b/vendor/github.com/posener/complete/cmd/install/fish.go @@ -0,0 +1,69 @@ +package install + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "text/template" +) + +// (un)install in fish + +type fish struct { + configDir string +} + +func (f fish) IsInstalled(cmd, bin string) bool { + completionFile := f.getCompletionFilePath(cmd) + if _, err := os.Stat(completionFile); err == nil { + return true + } + return false +} + +func (f fish) Install(cmd, bin string) error { + if f.IsInstalled(cmd, bin) { + return fmt.Errorf("already installed at %s", f.getCompletionFilePath(cmd)) + } + + completionFile := f.getCompletionFilePath(cmd) + completeCmd, err := f.cmd(cmd, bin) + if err != nil { + return err + } + + return createFile(completionFile, completeCmd) +} + +func (f fish) Uninstall(cmd, bin string) error { + if !f.IsInstalled(cmd, bin) { + return fmt.Errorf("does not installed in %s", f.configDir) + } + + completionFile := f.getCompletionFilePath(cmd) + return os.Remove(completionFile) +} + +func (f fish) getCompletionFilePath(cmd string) string { + return filepath.Join(f.configDir, "completions", fmt.Sprintf("%s.fish", cmd)) +} + +func (f fish) cmd(cmd, bin string) (string, error) { + var buf bytes.Buffer + params := struct{ Cmd, Bin string }{cmd, bin} + tmpl := template.Must(template.New("cmd").Parse(` +function __complete_{{.Cmd}} + set -lx COMP_LINE (commandline -cp) + test -z (commandline -ct) + and set COMP_LINE "$COMP_LINE " + {{.Bin}} +end +complete -f -c {{.Cmd}} -a "(__complete_{{.Cmd}})" +`)) + err := tmpl.Execute(&buf, params) + if err != nil { + return "", err + } + return buf.String(), nil +} diff --git a/vendor/github.com/posener/complete/cmd/install/install.go b/vendor/github.com/posener/complete/cmd/install/install.go new file mode 100644 index 000000000..884c23f5b --- /dev/null +++ b/vendor/github.com/posener/complete/cmd/install/install.go @@ -0,0 +1,148 @@ +package install + +import ( + "errors" + "os" + "os/user" + "path/filepath" + "runtime" + + "github.com/hashicorp/go-multierror" +) + +type installer interface { + IsInstalled(cmd, bin string) bool + Install(cmd, bin string) error + Uninstall(cmd, bin string) error +} + +// Install complete command given: +// cmd: is the command name +func Install(cmd string) error { + is := installers() + if len(is) == 0 { + return errors.New("Did not find any shells to install") + } + bin, err := getBinaryPath() + if err != nil { + return err + } + + for _, i := range is { + errI := i.Install(cmd, bin) + if errI != nil { + err = multierror.Append(err, errI) + } + } + + return err +} + +// IsInstalled returns true if the completion +// for the given cmd is installed. +func IsInstalled(cmd string) bool { + bin, err := getBinaryPath() + if err != nil { + return false + } + + for _, i := range installers() { + installed := i.IsInstalled(cmd, bin) + if installed { + return true + } + } + + return false +} + +// Uninstall complete command given: +// cmd: is the command name +func Uninstall(cmd string) error { + is := installers() + if len(is) == 0 { + return errors.New("Did not find any shells to uninstall") + } + bin, err := getBinaryPath() + if err != nil { + return err + } + + for _, i := range is { + errI := i.Uninstall(cmd, bin) + if errI != nil { + err = multierror.Append(err, errI) + } + } + + return err +} + +func installers() (i []installer) { + // The list of bash config files candidates where it is + // possible to install the completion command. + var bashConfFiles []string + switch runtime.GOOS { + case "darwin": + bashConfFiles = []string{".bash_profile"} + default: + bashConfFiles = []string{".bashrc", ".bash_profile", ".bash_login", ".profile"} + } + for _, rc := range bashConfFiles { + if f := rcFile(rc); f != "" { + i = append(i, bash{f}) + break + } + } + if f := rcFile(".zshrc"); f != "" { + i = append(i, zsh{f}) + } + if d := fishConfigDir(); d != "" { + i = append(i, fish{d}) + } + return +} + +func fishConfigDir() string { + configDir := filepath.Join(getConfigHomePath(), "fish") + if configDir == "" { + return "" + } + if info, err := os.Stat(configDir); err != nil || !info.IsDir() { + return "" + } + return configDir +} + +func getConfigHomePath() string { + u, err := user.Current() + if err != nil { + return "" + } + + configHome := os.Getenv("XDG_CONFIG_HOME") + if configHome == "" { + return filepath.Join(u.HomeDir, ".config") + } + return configHome +} + +func getBinaryPath() (string, error) { + bin, err := os.Executable() + if err != nil { + return "", err + } + return filepath.Abs(bin) +} + +func rcFile(name string) string { + u, err := user.Current() + if err != nil { + return "" + } + path := filepath.Join(u.HomeDir, name) + if _, err := os.Stat(path); err != nil { + return "" + } + return path +} diff --git a/vendor/github.com/posener/complete/cmd/install/utils.go b/vendor/github.com/posener/complete/cmd/install/utils.go new file mode 100644 index 000000000..d34ac8cae --- /dev/null +++ b/vendor/github.com/posener/complete/cmd/install/utils.go @@ -0,0 +1,140 @@ +package install + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" +) + +func lineInFile(name string, lookFor string) bool { + f, err := os.Open(name) + if err != nil { + return false + } + defer f.Close() + r := bufio.NewReader(f) + prefix := []byte{} + for { + line, isPrefix, err := r.ReadLine() + if err == io.EOF { + return false + } + if err != nil { + return false + } + if isPrefix { + prefix = append(prefix, line...) + continue + } + line = append(prefix, line...) + if string(line) == lookFor { + return true + } + prefix = prefix[:0] + } +} + +func createFile(name string, content string) error { + // make sure file directory exists + if err := os.MkdirAll(filepath.Dir(name), 0775); err != nil { + return err + } + + // create the file + f, err := os.Create(name) + if err != nil { + return err + } + defer f.Close() + + // write file content + _, err = f.WriteString(fmt.Sprintf("%s\n", content)) + return err +} + +func appendToFile(name string, content string) error { + f, err := os.OpenFile(name, os.O_RDWR|os.O_APPEND, 0) + if err != nil { + return err + } + defer f.Close() + _, err = f.WriteString(fmt.Sprintf("\n%s\n", content)) + return err +} + +func removeFromFile(name string, content string) error { + backup := name + ".bck" + err := copyFile(name, backup) + if err != nil { + return err + } + temp, err := removeContentToTempFile(name, content) + if err != nil { + return err + } + + err = copyFile(temp, name) + if err != nil { + return err + } + + return os.Remove(backup) +} + +func removeContentToTempFile(name, content string) (string, error) { + rf, err := os.Open(name) + if err != nil { + return "", err + } + defer rf.Close() + wf, err := ioutil.TempFile("/tmp", "complete-") + if err != nil { + return "", err + } + defer wf.Close() + + r := bufio.NewReader(rf) + prefix := []byte{} + for { + line, isPrefix, err := r.ReadLine() + if err == io.EOF { + break + } + if err != nil { + return "", err + } + if isPrefix { + prefix = append(prefix, line...) + continue + } + line = append(prefix, line...) + str := string(line) + if str == content { + continue + } + _, err = wf.WriteString(str + "\n") + if err != nil { + return "", err + } + prefix = prefix[:0] + } + return wf.Name(), nil +} + +func copyFile(src string, dst string) error { + in, err := os.Open(src) + if err != nil { + return err + } + defer in.Close() + out, err := os.Create(dst) + if err != nil { + return err + } + defer out.Close() + _, err = io.Copy(out, in) + return err +} diff --git a/vendor/github.com/posener/complete/cmd/install/zsh.go b/vendor/github.com/posener/complete/cmd/install/zsh.go new file mode 100644 index 000000000..29950ab17 --- /dev/null +++ b/vendor/github.com/posener/complete/cmd/install/zsh.go @@ -0,0 +1,44 @@ +package install + +import "fmt" + +// (un)install in zsh +// basically adds/remove from .zshrc: +// +// autoload -U +X bashcompinit && bashcompinit" +// complete -C +type zsh struct { + rc string +} + +func (z zsh) IsInstalled(cmd, bin string) bool { + completeCmd := z.cmd(cmd, bin) + return lineInFile(z.rc, completeCmd) +} + +func (z zsh) Install(cmd, bin string) error { + if z.IsInstalled(cmd, bin) { + return fmt.Errorf("already installed in %s", z.rc) + } + + completeCmd := z.cmd(cmd, bin) + bashCompInit := "autoload -U +X bashcompinit && bashcompinit" + if !lineInFile(z.rc, bashCompInit) { + completeCmd = bashCompInit + "\n" + completeCmd + } + + return appendToFile(z.rc, completeCmd) +} + +func (z zsh) Uninstall(cmd, bin string) error { + if !z.IsInstalled(cmd, bin) { + return fmt.Errorf("does not installed in %s", z.rc) + } + + completeCmd := z.cmd(cmd, bin) + return removeFromFile(z.rc, completeCmd) +} + +func (zsh) cmd(cmd, bin string) string { + return fmt.Sprintf("complete -o nospace -C %s %s", bin, cmd) +} diff --git a/vendor/github.com/posener/complete/command.go b/vendor/github.com/posener/complete/command.go new file mode 100644 index 000000000..82d37d529 --- /dev/null +++ b/vendor/github.com/posener/complete/command.go @@ -0,0 +1,111 @@ +package complete + +// Command represents a command line +// It holds the data that enables auto completion of command line +// Command can also be a sub command. +type Command struct { + // Sub is map of sub commands of the current command + // The key refer to the sub command name, and the value is it's + // Command descriptive struct. + Sub Commands + + // Flags is a map of flags that the command accepts. + // The key is the flag name, and the value is it's predictions. + Flags Flags + + // GlobalFlags is a map of flags that the command accepts. + // Global flags that can appear also after a sub command. + GlobalFlags Flags + + // Args are extra arguments that the command accepts, those who are + // given without any flag before. + Args Predictor +} + +// Predict returns all possible predictions for args according to the command struct +func (c *Command) Predict(a Args) []string { + options, _ := c.predict(a) + return options +} + +// Commands is the type of Sub member, it maps a command name to a command struct +type Commands map[string]Command + +// Predict completion of sub command names names according to command line arguments +func (c Commands) Predict(a Args) (prediction []string) { + for sub := range c { + prediction = append(prediction, sub) + } + return +} + +// Flags is the type Flags of the Flags member, it maps a flag name to the flag predictions. +type Flags map[string]Predictor + +// Predict completion of flags names according to command line arguments +func (f Flags) Predict(a Args) (prediction []string) { + for flag := range f { + // If the flag starts with a hyphen, we avoid emitting the prediction + // unless the last typed arg contains a hyphen as well. + flagHyphenStart := len(flag) != 0 && flag[0] == '-' + lastHyphenStart := len(a.Last) != 0 && a.Last[0] == '-' + if flagHyphenStart && !lastHyphenStart { + continue + } + prediction = append(prediction, flag) + } + return +} + +// predict options +// only is set to true if no more options are allowed to be returned +// those are in cases of special flag that has specific completion arguments, +// and other flags or sub commands can't come after it. +func (c *Command) predict(a Args) (options []string, only bool) { + + // search sub commands for predictions first + subCommandFound := false + for i, arg := range a.Completed { + if cmd, ok := c.Sub[arg]; ok { + subCommandFound = true + + // recursive call for sub command + options, only = cmd.predict(a.from(i)) + if only { + return + } + + // We matched so stop searching. Continuing to search can accidentally + // match a subcommand with current set of commands, see issue #46. + break + } + } + + // if last completed word is a global flag that we need to complete + if predictor, ok := c.GlobalFlags[a.LastCompleted]; ok && predictor != nil { + Log("Predicting according to global flag %s", a.LastCompleted) + return predictor.Predict(a), true + } + + options = append(options, c.GlobalFlags.Predict(a)...) + + // if a sub command was entered, we won't add the parent command + // completions and we return here. + if subCommandFound { + return + } + + // if last completed word is a command flag that we need to complete + if predictor, ok := c.Flags[a.LastCompleted]; ok && predictor != nil { + Log("Predicting according to flag %s", a.LastCompleted) + return predictor.Predict(a), true + } + + options = append(options, c.Sub.Predict(a)...) + options = append(options, c.Flags.Predict(a)...) + if c.Args != nil { + options = append(options, c.Args.Predict(a)...) + } + + return +} diff --git a/vendor/github.com/posener/complete/complete.go b/vendor/github.com/posener/complete/complete.go new file mode 100644 index 000000000..423cbec6c --- /dev/null +++ b/vendor/github.com/posener/complete/complete.go @@ -0,0 +1,104 @@ +package complete + +import ( + "flag" + "fmt" + "io" + "os" + "strconv" + "strings" + + "github.com/posener/complete/cmd" +) + +const ( + envLine = "COMP_LINE" + envPoint = "COMP_POINT" + envDebug = "COMP_DEBUG" +) + +// Complete structs define completion for a command with CLI options +type Complete struct { + Command Command + cmd.CLI + Out io.Writer +} + +// New creates a new complete command. +// name is the name of command we want to auto complete. +// IMPORTANT: it must be the same name - if the auto complete +// completes the 'go' command, name must be equal to "go". +// command is the struct of the command completion. +func New(name string, command Command) *Complete { + return &Complete{ + Command: command, + CLI: cmd.CLI{Name: name}, + Out: os.Stdout, + } +} + +// Run runs the completion and add installation flags beforehand. +// The flags are added to the main flag CommandLine variable. +func (c *Complete) Run() bool { + c.AddFlags(nil) + flag.Parse() + return c.Complete() +} + +// Complete a command from completion line in environment variable, +// and print out the complete options. +// returns success if the completion ran or if the cli matched +// any of the given flags, false otherwise +// For installation: it assumes that flags were added and parsed before +// it was called. +func (c *Complete) Complete() bool { + line, point, ok := getEnv() + if !ok { + // make sure flags parsed, + // in case they were not added in the main program + return c.CLI.Run() + } + + if point >= 0 && point < len(line) { + line = line[:point] + } + + Log("Completing phrase: %s", line) + a := newArgs(line) + Log("Completing last field: %s", a.Last) + options := c.Command.Predict(a) + Log("Options: %s", options) + + // filter only options that match the last argument + matches := []string{} + for _, option := range options { + if strings.HasPrefix(option, a.Last) { + matches = append(matches, option) + } + } + Log("Matches: %s", matches) + c.output(matches) + return true +} + +func getEnv() (line string, point int, ok bool) { + line = os.Getenv(envLine) + if line == "" { + return + } + point, err := strconv.Atoi(os.Getenv(envPoint)) + if err != nil { + // If failed parsing point for some reason, set it to point + // on the end of the line. + Log("Failed parsing point %s: %v", os.Getenv(envPoint), err) + point = len(line) + } + return line, point, true +} + +func (c *Complete) output(options []string) { + // stdout of program defines the complete options + for _, option := range options { + fmt.Fprintln(c.Out, option) + } +} diff --git a/vendor/github.com/posener/complete/doc.go b/vendor/github.com/posener/complete/doc.go new file mode 100644 index 000000000..0ae09a1b7 --- /dev/null +++ b/vendor/github.com/posener/complete/doc.go @@ -0,0 +1,110 @@ +/* +Package complete provides a tool for bash writing bash completion in go, and bash completion for the go command line. + +Writing bash completion scripts is a hard work. This package provides an easy way +to create bash completion scripts for any command, and also an easy way to install/uninstall +the completion of the command. + +Go Command Bash Completion + +In ./cmd/gocomplete there is an example for bash completion for the `go` command line. + +This is an example that uses the `complete` package on the `go` command - the `complete` package +can also be used to implement any completions, see #usage. + +Install + +1. Type in your shell: + + go get -u github.com/posener/complete/gocomplete + gocomplete -install + +2. Restart your shell + +Uninstall by `gocomplete -uninstall` + +Features + +- Complete `go` command, including sub commands and all flags. +- Complete packages names or `.go` files when necessary. +- Complete test names after `-run` flag. + +Complete package + +Supported shells: + +- [x] bash +- [x] zsh +- [x] fish + +Usage + +Assuming you have program called `run` and you want to have bash completion +for it, meaning, if you type `run` then space, then press the `Tab` key, +the shell will suggest relevant complete options. + +In that case, we will create a program called `runcomplete`, a go program, +with a `func main()` and so, that will make the completion of the `run` +program. Once the `runcomplete` will be in a binary form, we could +`runcomplete -install` and that will add to our shell all the bash completion +options for `run`. + +So here it is: + + import "github.com/posener/complete" + + func main() { + + // create a Command object, that represents the command we want + // to complete. + run := complete.Command{ + + // Sub defines a list of sub commands of the program, + // this is recursive, since every command is of type command also. + Sub: complete.Commands{ + + // add a build sub command + "build": complete.Command { + + // define flags of the build sub command + Flags: complete.Flags{ + // build sub command has a flag '-cpus', which + // expects number of cpus after it. in that case + // anything could complete this flag. + "-cpus": complete.PredictAnything, + }, + }, + }, + + // define flags of the 'run' main command + Flags: complete.Flags{ + // a flag -o, which expects a file ending with .out after + // it, the tab completion will auto complete for files matching + // the given pattern. + "-o": complete.PredictFiles("*.out"), + }, + + // define global flags of the 'run' main command + // those will show up also when a sub command was entered in the + // command line + GlobalFlags: complete.Flags{ + + // a flag '-h' which does not expects anything after it + "-h": complete.PredictNothing, + }, + } + + // run the command completion, as part of the main() function. + // this triggers the autocompletion when needed. + // name must be exactly as the binary that we want to complete. + complete.New("run", run).Run() + } + +Self completing program + +In case that the program that we want to complete is written in go we +can make it self completing. +Here is an example: ./example/self/main.go . + +*/ +package complete diff --git a/vendor/github.com/posener/complete/goreadme.json b/vendor/github.com/posener/complete/goreadme.json new file mode 100644 index 000000000..025ec76c9 --- /dev/null +++ b/vendor/github.com/posener/complete/goreadme.json @@ -0,0 +1,9 @@ +{ + "badges": { + "travis_ci": true, + "code_cov": true, + "golang_ci": true, + "go_doc": true, + "goreadme": true + } +} \ No newline at end of file diff --git a/vendor/github.com/posener/complete/log.go b/vendor/github.com/posener/complete/log.go new file mode 100644 index 000000000..c3029556e --- /dev/null +++ b/vendor/github.com/posener/complete/log.go @@ -0,0 +1,22 @@ +package complete + +import ( + "io/ioutil" + "log" + "os" +) + +// Log is used for debugging purposes +// since complete is running on tab completion, it is nice to +// have logs to the stderr (when writing your own completer) +// to write logs, set the COMP_DEBUG environment variable and +// use complete.Log in the complete program +var Log = getLogger() + +func getLogger() func(format string, args ...interface{}) { + var logfile = ioutil.Discard + if os.Getenv(envDebug) != "" { + logfile = os.Stderr + } + return log.New(logfile, "complete ", log.Flags()).Printf +} diff --git a/vendor/github.com/posener/complete/predict.go b/vendor/github.com/posener/complete/predict.go new file mode 100644 index 000000000..820706325 --- /dev/null +++ b/vendor/github.com/posener/complete/predict.go @@ -0,0 +1,41 @@ +package complete + +// Predictor implements a predict method, in which given +// command line arguments returns a list of options it predicts. +type Predictor interface { + Predict(Args) []string +} + +// PredictOr unions two predicate functions, so that the result predicate +// returns the union of their predication +func PredictOr(predictors ...Predictor) Predictor { + return PredictFunc(func(a Args) (prediction []string) { + for _, p := range predictors { + if p == nil { + continue + } + prediction = append(prediction, p.Predict(a)...) + } + return + }) +} + +// PredictFunc determines what terms can follow a command or a flag +// It is used for auto completion, given last - the last word in the already +// in the command line, what words can complete it. +type PredictFunc func(Args) []string + +// Predict invokes the predict function and implements the Predictor interface +func (p PredictFunc) Predict(a Args) []string { + if p == nil { + return nil + } + return p(a) +} + +// PredictNothing does not expect anything after. +var PredictNothing Predictor + +// PredictAnything expects something, but nothing particular, such as a number +// or arbitrary name. +var PredictAnything = PredictFunc(func(Args) []string { return nil }) diff --git a/vendor/github.com/posener/complete/predict_files.go b/vendor/github.com/posener/complete/predict_files.go new file mode 100644 index 000000000..25ae2d514 --- /dev/null +++ b/vendor/github.com/posener/complete/predict_files.go @@ -0,0 +1,174 @@ +package complete + +import ( + "io/ioutil" + "os" + "path/filepath" + "strings" +) + +// PredictDirs will search for directories in the given started to be typed +// path, if no path was started to be typed, it will complete to directories +// in the current working directory. +func PredictDirs(pattern string) Predictor { + return files(pattern, false) +} + +// PredictFiles will search for files matching the given pattern in the started to +// be typed path, if no path was started to be typed, it will complete to files that +// match the pattern in the current working directory. +// To match any file, use "*" as pattern. To match go files use "*.go", and so on. +func PredictFiles(pattern string) Predictor { + return files(pattern, true) +} + +func files(pattern string, allowFiles bool) PredictFunc { + + // search for files according to arguments, + // if only one directory has matched the result, search recursively into + // this directory to give more results. + return func(a Args) (prediction []string) { + prediction = predictFiles(a, pattern, allowFiles) + + // if the number of prediction is not 1, we either have many results or + // have no results, so we return it. + if len(prediction) != 1 { + return + } + + // only try deeper, if the one item is a directory + if stat, err := os.Stat(prediction[0]); err != nil || !stat.IsDir() { + return + } + + a.Last = prediction[0] + return predictFiles(a, pattern, allowFiles) + } +} + +func predictFiles(a Args, pattern string, allowFiles bool) []string { + if strings.HasSuffix(a.Last, "/..") { + return nil + } + + dir := directory(a.Last) + files := listFiles(dir, pattern, allowFiles) + + // add dir if match + files = append(files, dir) + + return PredictFilesSet(files).Predict(a) +} + +// directory gives the directory of the given partial path +// in case that it is not, we fall back to the current directory. +func directory(path string) string { + if info, err := os.Stat(path); err == nil && info.IsDir() { + return fixPathForm(path, path) + } + dir := filepath.Dir(path) + if info, err := os.Stat(dir); err == nil && info.IsDir() { + return fixPathForm(path, dir) + } + return "./" +} + +// PredictFilesSet predict according to file rules to a given set of file names +func PredictFilesSet(files []string) PredictFunc { + return func(a Args) (prediction []string) { + // add all matching files to prediction + for _, f := range files { + f = fixPathForm(a.Last, f) + + // test matching of file to the argument + if matchFile(f, a.Last) { + prediction = append(prediction, f) + } + } + return + } +} + +func listFiles(dir, pattern string, allowFiles bool) []string { + // set of all file names + m := map[string]bool{} + + // list files + if files, err := filepath.Glob(filepath.Join(dir, pattern)); err == nil { + for _, f := range files { + if stat, err := os.Stat(f); err != nil || stat.IsDir() || allowFiles { + m[f] = true + } + } + } + + // list directories + if dirs, err := ioutil.ReadDir(dir); err == nil { + for _, d := range dirs { + if d.IsDir() { + m[filepath.Join(dir, d.Name())] = true + } + } + } + + list := make([]string, 0, len(m)) + for k := range m { + list = append(list, k) + } + return list +} + +// MatchFile returns true if prefix can match the file +func matchFile(file, prefix string) bool { + // special case for current directory completion + if file == "./" && (prefix == "." || prefix == "") { + return true + } + if prefix == "." && strings.HasPrefix(file, ".") { + return true + } + + file = strings.TrimPrefix(file, "./") + prefix = strings.TrimPrefix(prefix, "./") + + return strings.HasPrefix(file, prefix) +} + +// fixPathForm changes a file name to a relative name +func fixPathForm(last string, file string) string { + // get wording directory for relative name + workDir, err := os.Getwd() + if err != nil { + return file + } + + abs, err := filepath.Abs(file) + if err != nil { + return file + } + + // if last is absolute, return path as absolute + if filepath.IsAbs(last) { + return fixDirPath(abs) + } + + rel, err := filepath.Rel(workDir, abs) + if err != nil { + return file + } + + // fix ./ prefix of path + if rel != "." && strings.HasPrefix(last, ".") { + rel = "./" + rel + } + + return fixDirPath(rel) +} + +func fixDirPath(path string) string { + info, err := os.Stat(path) + if err == nil && info.IsDir() && !strings.HasSuffix(path, "/") { + path += "/" + } + return path +} diff --git a/vendor/github.com/posener/complete/predict_set.go b/vendor/github.com/posener/complete/predict_set.go new file mode 100644 index 000000000..fa4a34ae4 --- /dev/null +++ b/vendor/github.com/posener/complete/predict_set.go @@ -0,0 +1,12 @@ +package complete + +// PredictSet expects specific set of terms, given in the options argument. +func PredictSet(options ...string) Predictor { + return predictSet(options) +} + +type predictSet []string + +func (p predictSet) Predict(a Args) []string { + return p +} diff --git a/vendor/github.com/russross/blackfriday/.gitignore b/vendor/github.com/russross/blackfriday/.gitignore new file mode 100644 index 000000000..75623dccc --- /dev/null +++ b/vendor/github.com/russross/blackfriday/.gitignore @@ -0,0 +1,8 @@ +*.out +*.swp +*.8 +*.6 +_obj +_test* +markdown +tags diff --git a/vendor/github.com/russross/blackfriday/.travis.yml b/vendor/github.com/russross/blackfriday/.travis.yml new file mode 100644 index 000000000..a49fff15a --- /dev/null +++ b/vendor/github.com/russross/blackfriday/.travis.yml @@ -0,0 +1,18 @@ +sudo: false +language: go +go: + - "1.9.x" + - "1.10.x" + - "1.11.x" + - tip +matrix: + fast_finish: true + allow_failures: + - go: tip +install: + - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d -s .) + - go tool vet . + - go test -v -race ./... diff --git a/vendor/github.com/russross/blackfriday/LICENSE.txt b/vendor/github.com/russross/blackfriday/LICENSE.txt new file mode 100644 index 000000000..7fbb253a8 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/LICENSE.txt @@ -0,0 +1,28 @@ +Blackfriday is distributed under the Simplified BSD License: + +Copyright © 2011 Russ Ross +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided with + the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/russross/blackfriday/README.md b/vendor/github.com/russross/blackfriday/README.md new file mode 100644 index 000000000..997ef5d42 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/README.md @@ -0,0 +1,364 @@ +Blackfriday +[![Build Status][BuildV2SVG]][BuildV2URL] +[![PkgGoDev][PkgGoDevV2SVG]][PkgGoDevV2URL] +=========== + +Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It +is paranoid about its input (so you can safely feed it user-supplied +data), it is fast, it supports common extensions (tables, smart +punctuation substitutions, etc.), and it is safe for all utf-8 +(unicode) input. + +HTML output is currently supported, along with Smartypants +extensions. + +It started as a translation from C of [Sundown][3]. + + +Installation +------------ + +Blackfriday is compatible with modern Go releases in module mode. +With Go installed: + + go get github.com/russross/blackfriday + +will resolve and add the package to the current development module, +then build and install it. Alternatively, you can achieve the same +if you import it in a package: + + import "github.com/russross/blackfriday" + +and `go get` without parameters. + +Old versions of Go and legacy GOPATH mode might work, +but no effort is made to keep them working. + + +Versions +-------- + +Currently maintained and recommended version of Blackfriday is `v2`. It's being +developed on its own branch: https://github.com/russross/blackfriday/tree/v2 and the +documentation is available at +https://pkg.go.dev/github.com/russross/blackfriday/v2. + +It is `go get`-able in module mode at `github.com/russross/blackfriday/v2`. + +Version 2 offers a number of improvements over v1: + +* Cleaned up API +* A separate call to [`Parse`][4], which produces an abstract syntax tree for + the document +* Latest bug fixes +* Flexibility to easily add your own rendering extensions + +Potential drawbacks: + +* Our benchmarks show v2 to be slightly slower than v1. Currently in the + ballpark of around 15%. +* API breakage. If you can't afford modifying your code to adhere to the new API + and don't care too much about the new features, v2 is probably not for you. +* Several bug fixes are trailing behind and still need to be forward-ported to + v2. See issue [#348](https://github.com/russross/blackfriday/issues/348) for + tracking. + +If you are still interested in the legacy `v1`, you can import it from +`github.com/russross/blackfriday`. Documentation for the legacy v1 can be found +here: https://pkg.go.dev/github.com/russross/blackfriday. + + +Usage +----- + +### v1 + +For basic usage, it is as simple as getting your input into a byte +slice and calling: + +```go +output := blackfriday.MarkdownBasic(input) +``` + +This renders it with no extensions enabled. To get a more useful +feature set, use this instead: + +```go +output := blackfriday.MarkdownCommon(input) +``` + +### v2 + +For the most sensible markdown processing, it is as simple as getting your input +into a byte slice and calling: + +```go +output := blackfriday.Run(input) +``` + +Your input will be parsed and the output rendered with a set of most popular +extensions enabled. If you want the most basic feature set, corresponding with +the bare Markdown specification, use: + +```go +output := blackfriday.Run(input, blackfriday.WithNoExtensions()) +``` + +### Sanitize untrusted content + +Blackfriday itself does nothing to protect against malicious content. If you are +dealing with user-supplied markdown, we recommend running Blackfriday's output +through HTML sanitizer such as [Bluemonday][5]. + +Here's an example of simple usage of Blackfriday together with Bluemonday: + +```go +import ( + "github.com/microcosm-cc/bluemonday" + "github.com/russross/blackfriday" +) + +// ... +unsafe := blackfriday.Run(input) +html := bluemonday.UGCPolicy().SanitizeBytes(unsafe) +``` + +### Custom options, v1 + +If you want to customize the set of options, first get a renderer +(currently only the HTML output engine), then use it to +call the more general `Markdown` function. For examples, see the +implementations of `MarkdownBasic` and `MarkdownCommon` in +`markdown.go`. + +### Custom options, v2 + +If you want to customize the set of options, use `blackfriday.WithExtensions`, +`blackfriday.WithRenderer` and `blackfriday.WithRefOverride`. + +### `blackfriday-tool` + +You can also check out `blackfriday-tool` for a more complete example +of how to use it. Download and install it using: + + go get github.com/russross/blackfriday-tool + +This is a simple command-line tool that allows you to process a +markdown file using a standalone program. You can also browse the +source directly on github if you are just looking for some example +code: + +* + +Note that if you have not already done so, installing +`blackfriday-tool` will be sufficient to download and install +blackfriday in addition to the tool itself. The tool binary will be +installed in `$GOPATH/bin`. This is a statically-linked binary that +can be copied to wherever you need it without worrying about +dependencies and library versions. + +### Sanitized anchor names + +Blackfriday includes an algorithm for creating sanitized anchor names +corresponding to a given input text. This algorithm is used to create +anchors for headings when `EXTENSION_AUTO_HEADER_IDS` is enabled. The +algorithm has a specification, so that other packages can create +compatible anchor names and links to those anchors. + +The specification is located at https://pkg.go.dev/github.com/russross/blackfriday#hdr-Sanitized_Anchor_Names. + +[`SanitizedAnchorName`](https://pkg.go.dev/github.com/russross/blackfriday#SanitizedAnchorName) exposes this functionality, and can be used to +create compatible links to the anchor names generated by blackfriday. +This algorithm is also implemented in a small standalone package at +[`github.com/shurcooL/sanitized_anchor_name`](https://pkg.go.dev/github.com/shurcooL/sanitized_anchor_name). It can be useful for clients +that want a small package and don't need full functionality of blackfriday. + + +Features +-------- + +All features of Sundown are supported, including: + +* **Compatibility**. The Markdown v1.0.3 test suite passes with + the `--tidy` option. Without `--tidy`, the differences are + mostly in whitespace and entity escaping, where blackfriday is + more consistent and cleaner. + +* **Common extensions**, including table support, fenced code + blocks, autolinks, strikethroughs, non-strict emphasis, etc. + +* **Safety**. Blackfriday is paranoid when parsing, making it safe + to feed untrusted user input without fear of bad things + happening. The test suite stress tests this and there are no + known inputs that make it crash. If you find one, please let me + know and send me the input that does it. + + NOTE: "safety" in this context means *runtime safety only*. In order to + protect yourself against JavaScript injection in untrusted content, see + [this example](https://github.com/russross/blackfriday#sanitize-untrusted-content). + +* **Fast processing**. It is fast enough to render on-demand in + most web applications without having to cache the output. + +* **Thread safety**. You can run multiple parsers in different + goroutines without ill effect. There is no dependence on global + shared state. + +* **Minimal dependencies**. Blackfriday only depends on standard + library packages in Go. The source code is pretty + self-contained, so it is easy to add to any project, including + Google App Engine projects. + +* **Standards compliant**. Output successfully validates using the + W3C validation tool for HTML 4.01 and XHTML 1.0 Transitional. + + +Extensions +---------- + +In addition to the standard markdown syntax, this package +implements the following extensions: + +* **Intra-word emphasis supression**. The `_` character is + commonly used inside words when discussing code, so having + markdown interpret it as an emphasis command is usually the + wrong thing. Blackfriday lets you treat all emphasis markers as + normal characters when they occur inside a word. + +* **Tables**. Tables can be created by drawing them in the input + using a simple syntax: + + ``` + Name | Age + --------|------ + Bob | 27 + Alice | 23 + ``` + +* **Fenced code blocks**. In addition to the normal 4-space + indentation to mark code blocks, you can explicitly mark them + and supply a language (to make syntax highlighting simple). Just + mark it like this: + + ```go + func getTrue() bool { + return true + } + ``` + + You can use 3 or more backticks to mark the beginning of the + block, and the same number to mark the end of the block. + + To preserve classes of fenced code blocks while using the bluemonday + HTML sanitizer, use the following policy: + + ```go + p := bluemonday.UGCPolicy() + p.AllowAttrs("class").Matching(regexp.MustCompile("^language-[a-zA-Z0-9]+$")).OnElements("code") + html := p.SanitizeBytes(unsafe) + ``` + +* **Definition lists**. A simple definition list is made of a single-line + term followed by a colon and the definition for that term. + + Cat + : Fluffy animal everyone likes + + Internet + : Vector of transmission for pictures of cats + + Terms must be separated from the previous definition by a blank line. + +* **Footnotes**. A marker in the text that will become a superscript number; + a footnote definition that will be placed in a list of footnotes at the + end of the document. A footnote looks like this: + + This is a footnote.[^1] + + [^1]: the footnote text. + +* **Autolinking**. Blackfriday can find URLs that have not been + explicitly marked as links and turn them into links. + +* **Strikethrough**. Use two tildes (`~~`) to mark text that + should be crossed out. + +* **Hard line breaks**. With this extension enabled (it is off by + default in the `MarkdownBasic` and `MarkdownCommon` convenience + functions), newlines in the input translate into line breaks in + the output. + +* **Smart quotes**. Smartypants-style punctuation substitution is + supported, turning normal double- and single-quote marks into + curly quotes, etc. + +* **LaTeX-style dash parsing** is an additional option, where `--` + is translated into `–`, and `---` is translated into + `—`. This differs from most smartypants processors, which + turn a single hyphen into an ndash and a double hyphen into an + mdash. + +* **Smart fractions**, where anything that looks like a fraction + is translated into suitable HTML (instead of just a few special + cases like most smartypant processors). For example, `4/5` + becomes `45`, which renders as + 45. + + +Other renderers +--------------- + +Blackfriday is structured to allow alternative rendering engines. Here +are a few of note: + +* [github_flavored_markdown](https://pkg.go.dev/github.com/shurcooL/github_flavored_markdown): + provides a GitHub Flavored Markdown renderer with fenced code block + highlighting, clickable heading anchor links. + + It's not customizable, and its goal is to produce HTML output + equivalent to the [GitHub Markdown API endpoint](https://developer.github.com/v3/markdown/#render-a-markdown-document-in-raw-mode), + except the rendering is performed locally. + +* [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt, + but for markdown. + +* [LaTeX output](https://gitlab.com/ambrevar/blackfriday-latex): + renders output as LaTeX. + +* [bfchroma](https://github.com/Depado/bfchroma/): provides convenience + integration with the [Chroma](https://github.com/alecthomas/chroma) code + highlighting library. bfchroma is only compatible with v2 of Blackfriday and + provides a drop-in renderer ready to use with Blackfriday, as well as + options and means for further customization. + +* [Blackfriday-Confluence](https://github.com/kentaro-m/blackfriday-confluence): provides a [Confluence Wiki Markup](https://confluence.atlassian.com/doc/confluence-wiki-markup-251003035.html) renderer. + +* [Blackfriday-Slack](https://github.com/karriereat/blackfriday-slack): converts markdown to slack message style + + +TODO +---- + +* More unit testing +* Improve Unicode support. It does not understand all Unicode + rules (about what constitutes a letter, a punctuation symbol, + etc.), so it may fail to detect word boundaries correctly in + some instances. It is safe on all UTF-8 input. + + +License +------- + +[Blackfriday is distributed under the Simplified BSD License](LICENSE.txt) + + + [1]: https://daringfireball.net/projects/markdown/ "Markdown" + [2]: https://golang.org/ "Go Language" + [3]: https://github.com/vmg/sundown "Sundown" + [4]: https://pkg.go.dev/github.com/russross/blackfriday/v2#Parse "Parse func" + [5]: https://github.com/microcosm-cc/bluemonday "Bluemonday" + + [BuildV2SVG]: https://travis-ci.org/russross/blackfriday.svg?branch=v2 + [BuildV2URL]: https://travis-ci.org/russross/blackfriday + [PkgGoDevV2SVG]: https://pkg.go.dev/badge/github.com/russross/blackfriday/v2 + [PkgGoDevV2URL]: https://pkg.go.dev/github.com/russross/blackfriday/v2 diff --git a/vendor/github.com/russross/blackfriday/block.go b/vendor/github.com/russross/blackfriday/block.go new file mode 100644 index 000000000..563cb2903 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/block.go @@ -0,0 +1,1480 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// Functions to parse block-level elements. +// + +package blackfriday + +import ( + "bytes" + "strings" + "unicode" +) + +// Parse block-level data. +// Note: this function and many that it calls assume that +// the input buffer ends with a newline. +func (p *parser) block(out *bytes.Buffer, data []byte) { + if len(data) == 0 || data[len(data)-1] != '\n' { + panic("block input is missing terminating newline") + } + + // this is called recursively: enforce a maximum depth + if p.nesting >= p.maxNesting { + return + } + p.nesting++ + + // parse out one block-level construct at a time + for len(data) > 0 { + // prefixed header: + // + // # Header 1 + // ## Header 2 + // ... + // ###### Header 6 + if p.isPrefixHeader(data) { + data = data[p.prefixHeader(out, data):] + continue + } + + // block of preformatted HTML: + // + //
+ // ... + //
+ if data[0] == '<' { + if i := p.html(out, data, true); i > 0 { + data = data[i:] + continue + } + } + + // title block + // + // % stuff + // % more stuff + // % even more stuff + if p.flags&EXTENSION_TITLEBLOCK != 0 { + if data[0] == '%' { + if i := p.titleBlock(out, data, true); i > 0 { + data = data[i:] + continue + } + } + } + + // blank lines. note: returns the # of bytes to skip + if i := p.isEmpty(data); i > 0 { + data = data[i:] + continue + } + + // indented code block: + // + // func max(a, b int) int { + // if a > b { + // return a + // } + // return b + // } + if p.codePrefix(data) > 0 { + data = data[p.code(out, data):] + continue + } + + // fenced code block: + // + // ``` go info string here + // func fact(n int) int { + // if n <= 1 { + // return n + // } + // return n * fact(n-1) + // } + // ``` + if p.flags&EXTENSION_FENCED_CODE != 0 { + if i := p.fencedCodeBlock(out, data, true); i > 0 { + data = data[i:] + continue + } + } + + // horizontal rule: + // + // ------ + // or + // ****** + // or + // ______ + if p.isHRule(data) { + p.r.HRule(out) + var i int + for i = 0; data[i] != '\n'; i++ { + } + data = data[i:] + continue + } + + // block quote: + // + // > A big quote I found somewhere + // > on the web + if p.quotePrefix(data) > 0 { + data = data[p.quote(out, data):] + continue + } + + // table: + // + // Name | Age | Phone + // ------|-----|--------- + // Bob | 31 | 555-1234 + // Alice | 27 | 555-4321 + if p.flags&EXTENSION_TABLES != 0 { + if i := p.table(out, data); i > 0 { + data = data[i:] + continue + } + } + + // an itemized/unordered list: + // + // * Item 1 + // * Item 2 + // + // also works with + or - + if p.uliPrefix(data) > 0 { + data = data[p.list(out, data, 0):] + continue + } + + // a numbered/ordered list: + // + // 1. Item 1 + // 2. Item 2 + if p.oliPrefix(data) > 0 { + data = data[p.list(out, data, LIST_TYPE_ORDERED):] + continue + } + + // definition lists: + // + // Term 1 + // : Definition a + // : Definition b + // + // Term 2 + // : Definition c + if p.flags&EXTENSION_DEFINITION_LISTS != 0 { + if p.dliPrefix(data) > 0 { + data = data[p.list(out, data, LIST_TYPE_DEFINITION):] + continue + } + } + + // anything else must look like a normal paragraph + // note: this finds underlined headers, too + data = data[p.paragraph(out, data):] + } + + p.nesting-- +} + +func (p *parser) isPrefixHeader(data []byte) bool { + if data[0] != '#' { + return false + } + + if p.flags&EXTENSION_SPACE_HEADERS != 0 { + level := 0 + for level < 6 && data[level] == '#' { + level++ + } + if data[level] != ' ' { + return false + } + } + return true +} + +func (p *parser) prefixHeader(out *bytes.Buffer, data []byte) int { + level := 0 + for level < 6 && data[level] == '#' { + level++ + } + i := skipChar(data, level, ' ') + end := skipUntilChar(data, i, '\n') + skip := end + id := "" + if p.flags&EXTENSION_HEADER_IDS != 0 { + j, k := 0, 0 + // find start/end of header id + for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ { + } + for k = j + 1; k < end && data[k] != '}'; k++ { + } + // extract header id iff found + if j < end && k < end { + id = string(data[j+2 : k]) + end = j + skip = k + 1 + for end > 0 && data[end-1] == ' ' { + end-- + } + } + } + for end > 0 && data[end-1] == '#' { + if isBackslashEscaped(data, end-1) { + break + } + end-- + } + for end > 0 && data[end-1] == ' ' { + end-- + } + if end > i { + if id == "" && p.flags&EXTENSION_AUTO_HEADER_IDS != 0 { + id = SanitizedAnchorName(string(data[i:end])) + } + work := func() bool { + p.inline(out, data[i:end]) + return true + } + p.r.Header(out, work, level, id) + } + return skip +} + +func (p *parser) isUnderlinedHeader(data []byte) int { + // test of level 1 header + if data[0] == '=' { + i := skipChar(data, 1, '=') + i = skipChar(data, i, ' ') + if data[i] == '\n' { + return 1 + } else { + return 0 + } + } + + // test of level 2 header + if data[0] == '-' { + i := skipChar(data, 1, '-') + i = skipChar(data, i, ' ') + if data[i] == '\n' { + return 2 + } else { + return 0 + } + } + + return 0 +} + +func (p *parser) titleBlock(out *bytes.Buffer, data []byte, doRender bool) int { + if data[0] != '%' { + return 0 + } + splitData := bytes.Split(data, []byte("\n")) + var i int + for idx, b := range splitData { + if !bytes.HasPrefix(b, []byte("%")) { + i = idx // - 1 + break + } + } + + data = bytes.Join(splitData[0:i], []byte("\n")) + p.r.TitleBlock(out, data) + + return len(data) +} + +func (p *parser) html(out *bytes.Buffer, data []byte, doRender bool) int { + var i, j int + + // identify the opening tag + if data[0] != '<' { + return 0 + } + curtag, tagfound := p.htmlFindTag(data[1:]) + + // handle special cases + if !tagfound { + // check for an HTML comment + if size := p.htmlComment(out, data, doRender); size > 0 { + return size + } + + // check for an
tag + if size := p.htmlHr(out, data, doRender); size > 0 { + return size + } + + // check for HTML CDATA + if size := p.htmlCDATA(out, data, doRender); size > 0 { + return size + } + + // no special case recognized + return 0 + } + + // look for an unindented matching closing tag + // followed by a blank line + found := false + /* + closetag := []byte("\n") + j = len(curtag) + 1 + for !found { + // scan for a closing tag at the beginning of a line + if skip := bytes.Index(data[j:], closetag); skip >= 0 { + j += skip + len(closetag) + } else { + break + } + + // see if it is the only thing on the line + if skip := p.isEmpty(data[j:]); skip > 0 { + // see if it is followed by a blank line/eof + j += skip + if j >= len(data) { + found = true + i = j + } else { + if skip := p.isEmpty(data[j:]); skip > 0 { + j += skip + found = true + i = j + } + } + } + } + */ + + // if not found, try a second pass looking for indented match + // but not if tag is "ins" or "del" (following original Markdown.pl) + if !found && curtag != "ins" && curtag != "del" { + i = 1 + for i < len(data) { + i++ + for i < len(data) && !(data[i-1] == '<' && data[i] == '/') { + i++ + } + + if i+2+len(curtag) >= len(data) { + break + } + + j = p.htmlFindEnd(curtag, data[i-1:]) + + if j > 0 { + i += j - 1 + found = true + break + } + } + } + + if !found { + return 0 + } + + // the end of the block has been found + if doRender { + // trim newlines + end := i + for end > 0 && data[end-1] == '\n' { + end-- + } + p.r.BlockHtml(out, data[:end]) + } + + return i +} + +func (p *parser) renderHTMLBlock(out *bytes.Buffer, data []byte, start int, doRender bool) int { + // html block needs to end with a blank line + if i := p.isEmpty(data[start:]); i > 0 { + size := start + i + if doRender { + // trim trailing newlines + end := size + for end > 0 && data[end-1] == '\n' { + end-- + } + p.r.BlockHtml(out, data[:end]) + } + return size + } + return 0 +} + +// HTML comment, lax form +func (p *parser) htmlComment(out *bytes.Buffer, data []byte, doRender bool) int { + i := p.inlineHTMLComment(out, data) + return p.renderHTMLBlock(out, data, i, doRender) +} + +// HTML CDATA section +func (p *parser) htmlCDATA(out *bytes.Buffer, data []byte, doRender bool) int { + const cdataTag = "') { + i++ + } + i++ + // no end-of-comment marker + if i >= len(data) { + return 0 + } + return p.renderHTMLBlock(out, data, i, doRender) +} + +// HR, which is the only self-closing block tag considered +func (p *parser) htmlHr(out *bytes.Buffer, data []byte, doRender bool) int { + if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') { + return 0 + } + if data[3] != ' ' && data[3] != '/' && data[3] != '>' { + // not an
tag after all; at least not a valid one + return 0 + } + + i := 3 + for data[i] != '>' && data[i] != '\n' { + i++ + } + + if data[i] == '>' { + return p.renderHTMLBlock(out, data, i+1, doRender) + } + + return 0 +} + +func (p *parser) htmlFindTag(data []byte) (string, bool) { + i := 0 + for isalnum(data[i]) { + i++ + } + key := string(data[:i]) + if _, ok := blockTags[key]; ok { + return key, true + } + return "", false +} + +func (p *parser) htmlFindEnd(tag string, data []byte) int { + // assume data[0] == '<' && data[1] == '/' already tested + + // check if tag is a match + closetag := []byte("") + if !bytes.HasPrefix(data, closetag) { + return 0 + } + i := len(closetag) + + // check that the rest of the line is blank + skip := 0 + if skip = p.isEmpty(data[i:]); skip == 0 { + return 0 + } + i += skip + skip = 0 + + if i >= len(data) { + return i + } + + if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 { + return i + } + if skip = p.isEmpty(data[i:]); skip == 0 { + // following line must be blank + return 0 + } + + return i + skip +} + +func (*parser) isEmpty(data []byte) int { + // it is okay to call isEmpty on an empty buffer + if len(data) == 0 { + return 0 + } + + var i int + for i = 0; i < len(data) && data[i] != '\n'; i++ { + if data[i] != ' ' && data[i] != '\t' { + return 0 + } + } + return i + 1 +} + +func (*parser) isHRule(data []byte) bool { + i := 0 + + // skip up to three spaces + for i < 3 && data[i] == ' ' { + i++ + } + + // look at the hrule char + if data[i] != '*' && data[i] != '-' && data[i] != '_' { + return false + } + c := data[i] + + // the whole line must be the char or whitespace + n := 0 + for data[i] != '\n' { + switch { + case data[i] == c: + n++ + case data[i] != ' ': + return false + } + i++ + } + + return n >= 3 +} + +// isFenceLine checks if there's a fence line (e.g., ``` or ``` go) at the beginning of data, +// and returns the end index if so, or 0 otherwise. It also returns the marker found. +// If syntax is not nil, it gets set to the syntax specified in the fence line. +// A final newline is mandatory to recognize the fence line, unless newlineOptional is true. +func isFenceLine(data []byte, info *string, oldmarker string, newlineOptional bool) (end int, marker string) { + i, size := 0, 0 + + // skip up to three spaces + for i < len(data) && i < 3 && data[i] == ' ' { + i++ + } + + // check for the marker characters: ~ or ` + if i >= len(data) { + return 0, "" + } + if data[i] != '~' && data[i] != '`' { + return 0, "" + } + + c := data[i] + + // the whole line must be the same char or whitespace + for i < len(data) && data[i] == c { + size++ + i++ + } + + // the marker char must occur at least 3 times + if size < 3 { + return 0, "" + } + marker = string(data[i-size : i]) + + // if this is the end marker, it must match the beginning marker + if oldmarker != "" && marker != oldmarker { + return 0, "" + } + + // TODO(shurcooL): It's probably a good idea to simplify the 2 code paths here + // into one, always get the info string, and discard it if the caller doesn't care. + if info != nil { + infoLength := 0 + i = skipChar(data, i, ' ') + + if i >= len(data) { + if newlineOptional && i == len(data) { + return i, marker + } + return 0, "" + } + + infoStart := i + + if data[i] == '{' { + i++ + infoStart++ + + for i < len(data) && data[i] != '}' && data[i] != '\n' { + infoLength++ + i++ + } + + if i >= len(data) || data[i] != '}' { + return 0, "" + } + + // strip all whitespace at the beginning and the end + // of the {} block + for infoLength > 0 && isspace(data[infoStart]) { + infoStart++ + infoLength-- + } + + for infoLength > 0 && isspace(data[infoStart+infoLength-1]) { + infoLength-- + } + + i++ + } else { + for i < len(data) && !isverticalspace(data[i]) { + infoLength++ + i++ + } + } + + *info = strings.TrimSpace(string(data[infoStart : infoStart+infoLength])) + } + + i = skipChar(data, i, ' ') + if i >= len(data) { + if newlineOptional { + return i, marker + } + return 0, "" + } + if data[i] == '\n' { + i++ // Take newline into account + } + + return i, marker +} + +// fencedCodeBlock returns the end index if data contains a fenced code block at the beginning, +// or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects. +// If doRender is true, a final newline is mandatory to recognize the fenced code block. +func (p *parser) fencedCodeBlock(out *bytes.Buffer, data []byte, doRender bool) int { + var infoString string + beg, marker := isFenceLine(data, &infoString, "", false) + if beg == 0 || beg >= len(data) { + return 0 + } + + var work bytes.Buffer + + for { + // safe to assume beg < len(data) + + // check for the end of the code block + newlineOptional := !doRender + fenceEnd, _ := isFenceLine(data[beg:], nil, marker, newlineOptional) + if fenceEnd != 0 { + beg += fenceEnd + break + } + + // copy the current line + end := skipUntilChar(data, beg, '\n') + 1 + + // did we reach the end of the buffer without a closing marker? + if end >= len(data) { + return 0 + } + + // verbatim copy to the working buffer + if doRender { + work.Write(data[beg:end]) + } + beg = end + } + + if doRender { + p.r.BlockCode(out, work.Bytes(), infoString) + } + + return beg +} + +func (p *parser) table(out *bytes.Buffer, data []byte) int { + var header bytes.Buffer + i, columns := p.tableHeader(&header, data) + if i == 0 { + return 0 + } + + var body bytes.Buffer + + for i < len(data) { + pipes, rowStart := 0, i + for ; data[i] != '\n'; i++ { + if data[i] == '|' { + pipes++ + } + } + + if pipes == 0 { + i = rowStart + break + } + + // include the newline in data sent to tableRow + i++ + p.tableRow(&body, data[rowStart:i], columns, false) + } + + p.r.Table(out, header.Bytes(), body.Bytes(), columns) + + return i +} + +// check if the specified position is preceded by an odd number of backslashes +func isBackslashEscaped(data []byte, i int) bool { + backslashes := 0 + for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' { + backslashes++ + } + return backslashes&1 == 1 +} + +func (p *parser) tableHeader(out *bytes.Buffer, data []byte) (size int, columns []int) { + i := 0 + colCount := 1 + for i = 0; data[i] != '\n'; i++ { + if data[i] == '|' && !isBackslashEscaped(data, i) { + colCount++ + } + } + + // doesn't look like a table header + if colCount == 1 { + return + } + + // include the newline in the data sent to tableRow + header := data[:i+1] + + // column count ignores pipes at beginning or end of line + if data[0] == '|' { + colCount-- + } + if i > 2 && data[i-1] == '|' && !isBackslashEscaped(data, i-1) { + colCount-- + } + + columns = make([]int, colCount) + + // move on to the header underline + i++ + if i >= len(data) { + return + } + + if data[i] == '|' && !isBackslashEscaped(data, i) { + i++ + } + i = skipChar(data, i, ' ') + + // each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3 + // and trailing | optional on last column + col := 0 + for data[i] != '\n' { + dashes := 0 + + if data[i] == ':' { + i++ + columns[col] |= TABLE_ALIGNMENT_LEFT + dashes++ + } + for data[i] == '-' { + i++ + dashes++ + } + if data[i] == ':' { + i++ + columns[col] |= TABLE_ALIGNMENT_RIGHT + dashes++ + } + for data[i] == ' ' { + i++ + } + + // end of column test is messy + switch { + case dashes < 3: + // not a valid column + return + + case data[i] == '|' && !isBackslashEscaped(data, i): + // marker found, now skip past trailing whitespace + col++ + i++ + for data[i] == ' ' { + i++ + } + + // trailing junk found after last column + if col >= colCount && data[i] != '\n' { + return + } + + case (data[i] != '|' || isBackslashEscaped(data, i)) && col+1 < colCount: + // something else found where marker was required + return + + case data[i] == '\n': + // marker is optional for the last column + col++ + + default: + // trailing junk found after last column + return + } + } + if col != colCount { + return + } + + p.tableRow(out, header, columns, true) + size = i + 1 + return +} + +func (p *parser) tableRow(out *bytes.Buffer, data []byte, columns []int, header bool) { + i, col := 0, 0 + var rowWork bytes.Buffer + + if data[i] == '|' && !isBackslashEscaped(data, i) { + i++ + } + + for col = 0; col < len(columns) && i < len(data); col++ { + for data[i] == ' ' { + i++ + } + + cellStart := i + + for (data[i] != '|' || isBackslashEscaped(data, i)) && data[i] != '\n' { + i++ + } + + cellEnd := i + + // skip the end-of-cell marker, possibly taking us past end of buffer + i++ + + for cellEnd > cellStart && data[cellEnd-1] == ' ' { + cellEnd-- + } + + var cellWork bytes.Buffer + p.inline(&cellWork, data[cellStart:cellEnd]) + + if header { + p.r.TableHeaderCell(&rowWork, cellWork.Bytes(), columns[col]) + } else { + p.r.TableCell(&rowWork, cellWork.Bytes(), columns[col]) + } + } + + // pad it out with empty columns to get the right number + for ; col < len(columns); col++ { + if header { + p.r.TableHeaderCell(&rowWork, nil, columns[col]) + } else { + p.r.TableCell(&rowWork, nil, columns[col]) + } + } + + // silently ignore rows with too many cells + + p.r.TableRow(out, rowWork.Bytes()) +} + +// returns blockquote prefix length +func (p *parser) quotePrefix(data []byte) int { + i := 0 + for i < 3 && data[i] == ' ' { + i++ + } + if data[i] == '>' { + if data[i+1] == ' ' { + return i + 2 + } + return i + 1 + } + return 0 +} + +// blockquote ends with at least one blank line +// followed by something without a blockquote prefix +func (p *parser) terminateBlockquote(data []byte, beg, end int) bool { + if p.isEmpty(data[beg:]) <= 0 { + return false + } + if end >= len(data) { + return true + } + return p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0 +} + +// parse a blockquote fragment +func (p *parser) quote(out *bytes.Buffer, data []byte) int { + var raw bytes.Buffer + beg, end := 0, 0 + for beg < len(data) { + end = beg + // Step over whole lines, collecting them. While doing that, check for + // fenced code and if one's found, incorporate it altogether, + // irregardless of any contents inside it + for data[end] != '\n' { + if p.flags&EXTENSION_FENCED_CODE != 0 { + if i := p.fencedCodeBlock(out, data[end:], false); i > 0 { + // -1 to compensate for the extra end++ after the loop: + end += i - 1 + break + } + } + end++ + } + end++ + + if pre := p.quotePrefix(data[beg:]); pre > 0 { + // skip the prefix + beg += pre + } else if p.terminateBlockquote(data, beg, end) { + break + } + + // this line is part of the blockquote + raw.Write(data[beg:end]) + beg = end + } + + var cooked bytes.Buffer + p.block(&cooked, raw.Bytes()) + p.r.BlockQuote(out, cooked.Bytes()) + return end +} + +// returns prefix length for block code +func (p *parser) codePrefix(data []byte) int { + if data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' { + return 4 + } + return 0 +} + +func (p *parser) code(out *bytes.Buffer, data []byte) int { + var work bytes.Buffer + + i := 0 + for i < len(data) { + beg := i + for data[i] != '\n' { + i++ + } + i++ + + blankline := p.isEmpty(data[beg:i]) > 0 + if pre := p.codePrefix(data[beg:i]); pre > 0 { + beg += pre + } else if !blankline { + // non-empty, non-prefixed line breaks the pre + i = beg + break + } + + // verbatim copy to the working buffeu + if blankline { + work.WriteByte('\n') + } else { + work.Write(data[beg:i]) + } + } + + // trim all the \n off the end of work + workbytes := work.Bytes() + eol := len(workbytes) + for eol > 0 && workbytes[eol-1] == '\n' { + eol-- + } + if eol != len(workbytes) { + work.Truncate(eol) + } + + work.WriteByte('\n') + + p.r.BlockCode(out, work.Bytes(), "") + + return i +} + +// returns unordered list item prefix +func (p *parser) uliPrefix(data []byte) int { + i := 0 + + // start with up to 3 spaces + for i < 3 && data[i] == ' ' { + i++ + } + + // need a *, +, or - followed by a space + if (data[i] != '*' && data[i] != '+' && data[i] != '-') || + data[i+1] != ' ' { + return 0 + } + return i + 2 +} + +// returns ordered list item prefix +func (p *parser) oliPrefix(data []byte) int { + i := 0 + + // start with up to 3 spaces + for i < 3 && data[i] == ' ' { + i++ + } + + // count the digits + start := i + for data[i] >= '0' && data[i] <= '9' { + i++ + } + + // we need >= 1 digits followed by a dot and a space + if start == i || data[i] != '.' || data[i+1] != ' ' { + return 0 + } + return i + 2 +} + +// returns definition list item prefix +func (p *parser) dliPrefix(data []byte) int { + i := 0 + + // need a : followed by a spaces + if data[i] != ':' || data[i+1] != ' ' { + return 0 + } + for data[i] == ' ' { + i++ + } + return i + 2 +} + +// parse ordered or unordered list block +func (p *parser) list(out *bytes.Buffer, data []byte, flags int) int { + i := 0 + flags |= LIST_ITEM_BEGINNING_OF_LIST + work := func() bool { + for i < len(data) { + skip := p.listItem(out, data[i:], &flags) + i += skip + + if skip == 0 || flags&LIST_ITEM_END_OF_LIST != 0 { + break + } + flags &= ^LIST_ITEM_BEGINNING_OF_LIST + } + return true + } + + p.r.List(out, work, flags) + return i +} + +// Parse a single list item. +// Assumes initial prefix is already removed if this is a sublist. +func (p *parser) listItem(out *bytes.Buffer, data []byte, flags *int) int { + // keep track of the indentation of the first line + itemIndent := 0 + for itemIndent < 3 && data[itemIndent] == ' ' { + itemIndent++ + } + + i := p.uliPrefix(data) + if i == 0 { + i = p.oliPrefix(data) + } + if i == 0 { + i = p.dliPrefix(data) + // reset definition term flag + if i > 0 { + *flags &= ^LIST_TYPE_TERM + } + } + if i == 0 { + // if in defnition list, set term flag and continue + if *flags&LIST_TYPE_DEFINITION != 0 { + *flags |= LIST_TYPE_TERM + } else { + return 0 + } + } + + // skip leading whitespace on first line + for data[i] == ' ' { + i++ + } + + // find the end of the line + line := i + for i > 0 && data[i-1] != '\n' { + i++ + } + + // process the following lines + containsBlankLine := false + sublist := 0 + codeBlockMarker := "" + if p.flags&EXTENSION_FENCED_CODE != 0 && i > line { + // determine if codeblock starts on the first line + _, codeBlockMarker = isFenceLine(data[line:i], nil, "", false) + } + + // get working buffer + var raw bytes.Buffer + + // put the first line into the working buffer + raw.Write(data[line:i]) + line = i + +gatherlines: + for line < len(data) { + i++ + + // find the end of this line + for data[i-1] != '\n' { + i++ + } + // if it is an empty line, guess that it is part of this item + // and move on to the next line + if p.isEmpty(data[line:i]) > 0 { + containsBlankLine = true + raw.Write(data[line:i]) + line = i + continue + } + + // calculate the indentation + indent := 0 + for indent < 4 && line+indent < i && data[line+indent] == ' ' { + indent++ + } + + chunk := data[line+indent : i] + + if p.flags&EXTENSION_FENCED_CODE != 0 { + // determine if in or out of codeblock + // if in codeblock, ignore normal list processing + _, marker := isFenceLine(chunk, nil, codeBlockMarker, false) + if marker != "" { + if codeBlockMarker == "" { + // start of codeblock + codeBlockMarker = marker + } else { + // end of codeblock. + *flags |= LIST_ITEM_CONTAINS_BLOCK + codeBlockMarker = "" + } + } + // we are in a codeblock, write line, and continue + if codeBlockMarker != "" || marker != "" { + raw.Write(data[line+indent : i]) + line = i + continue gatherlines + } + } + + // evaluate how this line fits in + switch { + // is this a nested list item? + case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) || + p.oliPrefix(chunk) > 0 || + p.dliPrefix(chunk) > 0: + + if containsBlankLine { + // end the list if the type changed after a blank line + if indent <= itemIndent && + ((*flags&LIST_TYPE_ORDERED != 0 && p.uliPrefix(chunk) > 0) || + (*flags&LIST_TYPE_ORDERED == 0 && p.oliPrefix(chunk) > 0)) { + + *flags |= LIST_ITEM_END_OF_LIST + break gatherlines + } + *flags |= LIST_ITEM_CONTAINS_BLOCK + } + + // to be a nested list, it must be indented more + // if not, it is the next item in the same list + if indent <= itemIndent { + break gatherlines + } + + // is this the first item in the nested list? + if sublist == 0 { + sublist = raw.Len() + } + + // is this a nested prefix header? + case p.isPrefixHeader(chunk): + // if the header is not indented, it is not nested in the list + // and thus ends the list + if containsBlankLine && indent < 4 { + *flags |= LIST_ITEM_END_OF_LIST + break gatherlines + } + *flags |= LIST_ITEM_CONTAINS_BLOCK + + // anything following an empty line is only part + // of this item if it is indented 4 spaces + // (regardless of the indentation of the beginning of the item) + case containsBlankLine && indent < 4: + if *flags&LIST_TYPE_DEFINITION != 0 && i < len(data)-1 { + // is the next item still a part of this list? + next := i + for data[next] != '\n' { + next++ + } + for next < len(data)-1 && data[next] == '\n' { + next++ + } + if i < len(data)-1 && data[i] != ':' && data[next] != ':' { + *flags |= LIST_ITEM_END_OF_LIST + } + } else { + *flags |= LIST_ITEM_END_OF_LIST + } + break gatherlines + + // a blank line means this should be parsed as a block + case containsBlankLine: + *flags |= LIST_ITEM_CONTAINS_BLOCK + } + + containsBlankLine = false + + // add the line into the working buffer without prefix + raw.Write(data[line+indent : i]) + + line = i + } + + // If reached end of data, the Renderer.ListItem call we're going to make below + // is definitely the last in the list. + if line >= len(data) { + *flags |= LIST_ITEM_END_OF_LIST + } + + rawBytes := raw.Bytes() + + // render the contents of the list item + var cooked bytes.Buffer + if *flags&LIST_ITEM_CONTAINS_BLOCK != 0 && *flags&LIST_TYPE_TERM == 0 { + // intermediate render of block item, except for definition term + if sublist > 0 { + p.block(&cooked, rawBytes[:sublist]) + p.block(&cooked, rawBytes[sublist:]) + } else { + p.block(&cooked, rawBytes) + } + } else { + // intermediate render of inline item + if sublist > 0 { + p.inline(&cooked, rawBytes[:sublist]) + p.block(&cooked, rawBytes[sublist:]) + } else { + p.inline(&cooked, rawBytes) + } + } + + // render the actual list item + cookedBytes := cooked.Bytes() + parsedEnd := len(cookedBytes) + + // strip trailing newlines + for parsedEnd > 0 && cookedBytes[parsedEnd-1] == '\n' { + parsedEnd-- + } + p.r.ListItem(out, cookedBytes[:parsedEnd], *flags) + + return line +} + +// render a single paragraph that has already been parsed out +func (p *parser) renderParagraph(out *bytes.Buffer, data []byte) { + if len(data) == 0 { + return + } + + // trim leading spaces + beg := 0 + for data[beg] == ' ' { + beg++ + } + + // trim trailing newline + end := len(data) - 1 + + // trim trailing spaces + for end > beg && data[end-1] == ' ' { + end-- + } + + work := func() bool { + p.inline(out, data[beg:end]) + return true + } + p.r.Paragraph(out, work) +} + +func (p *parser) paragraph(out *bytes.Buffer, data []byte) int { + // prev: index of 1st char of previous line + // line: index of 1st char of current line + // i: index of cursor/end of current line + var prev, line, i int + + // keep going until we find something to mark the end of the paragraph + for i < len(data) { + // mark the beginning of the current line + prev = line + current := data[i:] + line = i + + // did we find a blank line marking the end of the paragraph? + if n := p.isEmpty(current); n > 0 { + // did this blank line followed by a definition list item? + if p.flags&EXTENSION_DEFINITION_LISTS != 0 { + if i < len(data)-1 && data[i+1] == ':' { + return p.list(out, data[prev:], LIST_TYPE_DEFINITION) + } + } + + p.renderParagraph(out, data[:i]) + return i + n + } + + // an underline under some text marks a header, so our paragraph ended on prev line + if i > 0 { + if level := p.isUnderlinedHeader(current); level > 0 { + // render the paragraph + p.renderParagraph(out, data[:prev]) + + // ignore leading and trailing whitespace + eol := i - 1 + for prev < eol && data[prev] == ' ' { + prev++ + } + for eol > prev && data[eol-1] == ' ' { + eol-- + } + + // render the header + // this ugly double closure avoids forcing variables onto the heap + work := func(o *bytes.Buffer, pp *parser, d []byte) func() bool { + return func() bool { + pp.inline(o, d) + return true + } + }(out, p, data[prev:eol]) + + id := "" + if p.flags&EXTENSION_AUTO_HEADER_IDS != 0 { + id = SanitizedAnchorName(string(data[prev:eol])) + } + + p.r.Header(out, work, level, id) + + // find the end of the underline + for data[i] != '\n' { + i++ + } + return i + } + } + + // if the next line starts a block of HTML, then the paragraph ends here + if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 { + if data[i] == '<' && p.html(out, current, false) > 0 { + // rewind to before the HTML block + p.renderParagraph(out, data[:i]) + return i + } + } + + // if there's a prefixed header or a horizontal rule after this, paragraph is over + if p.isPrefixHeader(current) || p.isHRule(current) { + p.renderParagraph(out, data[:i]) + return i + } + + // if there's a fenced code block, paragraph is over + if p.flags&EXTENSION_FENCED_CODE != 0 { + if p.fencedCodeBlock(out, current, false) > 0 { + p.renderParagraph(out, data[:i]) + return i + } + } + + // if there's a definition list item, prev line is a definition term + if p.flags&EXTENSION_DEFINITION_LISTS != 0 { + if p.dliPrefix(current) != 0 { + return p.list(out, data[prev:], LIST_TYPE_DEFINITION) + } + } + + // if there's a list after this, paragraph is over + if p.flags&EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK != 0 { + if p.uliPrefix(current) != 0 || + p.oliPrefix(current) != 0 || + p.quotePrefix(current) != 0 || + p.codePrefix(current) != 0 { + p.renderParagraph(out, data[:i]) + return i + } + } + + // otherwise, scan to the beginning of the next line + for data[i] != '\n' { + i++ + } + i++ + } + + p.renderParagraph(out, data[:i]) + return i +} + +// SanitizedAnchorName returns a sanitized anchor name for the given text. +// +// It implements the algorithm specified in the package comment. +func SanitizedAnchorName(text string) string { + var anchorName []rune + futureDash := false + for _, r := range text { + switch { + case unicode.IsLetter(r) || unicode.IsNumber(r): + if futureDash && len(anchorName) > 0 { + anchorName = append(anchorName, '-') + } + futureDash = false + anchorName = append(anchorName, unicode.ToLower(r)) + default: + futureDash = true + } + } + return string(anchorName) +} diff --git a/vendor/github.com/russross/blackfriday/doc.go b/vendor/github.com/russross/blackfriday/doc.go new file mode 100644 index 000000000..9656c42a1 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/doc.go @@ -0,0 +1,32 @@ +// Package blackfriday is a Markdown processor. +// +// It translates plain text with simple formatting rules into HTML or LaTeX. +// +// Sanitized Anchor Names +// +// Blackfriday includes an algorithm for creating sanitized anchor names +// corresponding to a given input text. This algorithm is used to create +// anchors for headings when EXTENSION_AUTO_HEADER_IDS is enabled. The +// algorithm is specified below, so that other packages can create +// compatible anchor names and links to those anchors. +// +// The algorithm iterates over the input text, interpreted as UTF-8, +// one Unicode code point (rune) at a time. All runes that are letters (category L) +// or numbers (category N) are considered valid characters. They are mapped to +// lower case, and included in the output. All other runes are considered +// invalid characters. Invalid characters that preceed the first valid character, +// as well as invalid character that follow the last valid character +// are dropped completely. All other sequences of invalid characters +// between two valid characters are replaced with a single dash character '-'. +// +// SanitizedAnchorName exposes this functionality, and can be used to +// create compatible links to the anchor names generated by blackfriday. +// This algorithm is also implemented in a small standalone package at +// github.com/shurcooL/sanitized_anchor_name. It can be useful for clients +// that want a small package and don't need full functionality of blackfriday. +package blackfriday + +// NOTE: Keep Sanitized Anchor Name algorithm in sync with package +// github.com/shurcooL/sanitized_anchor_name. +// Otherwise, users of sanitized_anchor_name will get anchor names +// that are incompatible with those generated by blackfriday. diff --git a/vendor/github.com/russross/blackfriday/html.go b/vendor/github.com/russross/blackfriday/html.go new file mode 100644 index 000000000..fa044ca21 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/html.go @@ -0,0 +1,945 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// +// HTML rendering backend +// +// + +package blackfriday + +import ( + "bytes" + "fmt" + "regexp" + "strconv" + "strings" +) + +// Html renderer configuration options. +const ( + HTML_SKIP_HTML = 1 << iota // skip preformatted HTML blocks + HTML_SKIP_STYLE // skip embedded