From 608c11d2f03dea0489df874995fcc106c540d3a9 Mon Sep 17 00:00:00 2001 From: "Nadgowda, Shripad" Date: Thu, 19 Dec 2024 16:18:12 -0600 Subject: [PATCH] terraform for itac starts here Signed-off-by: Nadgowda, Shripad --- .copywrite.hcl | 21 + GNUmakefile | 6 + README.md | 66 ++ docs/data-sources/example.md | 30 + docs/functions/example.md | 26 + docs/index.md | 26 + docs/resources/example.md | 31 + examples/datasource/main.tf | 32 + .../gen-ai-xeon-opea-chatqna/cloud_init.yaml | 16 + examples/gen-ai-xeon-opea-chatqna/main.tf | 52 ++ .../gen-ai-xeon-opea-chatqna/terraform.tfvars | 8 + .../gen-ai-xeon-opea-chatqna/variables.tf | 80 +++ .../gen-ai-xeon-opea-codegen/cloud_init.yaml | 16 + examples/gen-ai-xeon-opea-codegen/main.tf | 52 ++ .../gen-ai-xeon-opea-codegen/terraform.tfvars | 8 + .../gen-ai-xeon-opea-codegen/variables.tf | 80 +++ examples/provider/provider.tf | 3 + examples/resources/all_resources/main.tf | 126 ++++ .../resources/all_resources/terraform.tfvars | 5 + examples/resources/all_resources/variables.tf | 62 ++ examples/resources/bucket_users/main.tf | 45 ++ examples/resources/buckets/main.tf | 20 + examples/resources/file_vm/main.tf | 60 ++ examples/resources/file_vm/terraform.tfvars | 8 + examples/resources/file_vm/variables.tf | 73 ++ examples/resources/filesystems/create/main.tf | 22 + examples/resources/filesystems/list/main.tf | 18 + examples/resources/iks/create-prod/main.tf | 61 ++ examples/resources/iks/create/main.tf | 72 ++ examples/resources/iks/list/main.tf | 17 + examples/resources/iks/nodegroup/main.tf | 37 ++ examples/resources/instances/create/main.tf | 34 + go.mod | 74 +++ go.sum | 241 +++++++ internal/models/common.go | 10 + internal/models/images.go | 10 + internal/models/instance.go | 79 +++ internal/models/instance_types.go | 9 + internal/models/kubernetes.go | 107 +++ internal/models/storages.go | 88 +++ internal/provider/filesystem_resource.go | 347 ++++++++++ internal/provider/filesystems_data_source.go | 197 ++++++ internal/provider/iks_cluster_resource.go | 325 +++++++++ internal/provider/iks_data_source.go | 290 ++++++++ .../provider/iks_load_balancer_resource.go | 182 +++++ internal/provider/iks_node_group_resource.go | 228 +++++++ internal/provider/instance_data_source.go | 238 +++++++ internal/provider/instance_resource.go | 442 ++++++++++++ .../provider/instance_types_data_source.go | 107 +++ .../provider/machine_images_data_source.go | 219 ++++++ internal/provider/objectstore_resource.go | 279 ++++++++ .../provider/objectstore_user_resource.go | 282 ++++++++ internal/provider/provider.go | 220 ++++++ internal/provider/provider_test.go | 25 + internal/provider/sshkey_data_source.go | 148 +++++ internal/provider/sshkey_resource.go | 210 ++++++ internal/provider/utils.go | 5 + main.go | 56 ++ pkg/itacservices/client.go | 103 +++ pkg/itacservices/common/httpclient.go | 115 ++++ pkg/itacservices/common/utils.go | 37 ++ pkg/itacservices/data_sources.go | 100 +++ pkg/itacservices/filesystems.go | 287 ++++++++ pkg/itacservices/instances.go | 356 ++++++++++ pkg/itacservices/kubernetes.go | 627 ++++++++++++++++++ pkg/itacservices/object_storage.go | 322 +++++++++ pkg/itacservices/sshkeys.go | 180 +++++ tools/tools.go | 11 + 68 files changed, 7769 insertions(+) create mode 100644 .copywrite.hcl create mode 100644 GNUmakefile create mode 100644 README.md create mode 100644 docs/data-sources/example.md create mode 100644 docs/functions/example.md create mode 100644 docs/index.md create mode 100644 docs/resources/example.md create mode 100644 examples/datasource/main.tf create mode 100644 examples/gen-ai-xeon-opea-chatqna/cloud_init.yaml create mode 100644 examples/gen-ai-xeon-opea-chatqna/main.tf create mode 100644 examples/gen-ai-xeon-opea-chatqna/terraform.tfvars create mode 100644 examples/gen-ai-xeon-opea-chatqna/variables.tf create mode 100644 examples/gen-ai-xeon-opea-codegen/cloud_init.yaml create mode 100644 examples/gen-ai-xeon-opea-codegen/main.tf create mode 100644 examples/gen-ai-xeon-opea-codegen/terraform.tfvars create mode 100644 examples/gen-ai-xeon-opea-codegen/variables.tf create mode 100644 examples/provider/provider.tf create mode 100644 examples/resources/all_resources/main.tf create mode 100644 examples/resources/all_resources/terraform.tfvars create mode 100644 examples/resources/all_resources/variables.tf create mode 100644 examples/resources/bucket_users/main.tf create mode 100644 examples/resources/buckets/main.tf create mode 100644 examples/resources/file_vm/main.tf create mode 100644 examples/resources/file_vm/terraform.tfvars create mode 100644 examples/resources/file_vm/variables.tf create mode 100644 examples/resources/filesystems/create/main.tf create mode 100644 examples/resources/filesystems/list/main.tf create mode 100644 examples/resources/iks/create-prod/main.tf create mode 100644 examples/resources/iks/create/main.tf create mode 100644 examples/resources/iks/list/main.tf create mode 100644 examples/resources/iks/nodegroup/main.tf create mode 100644 examples/resources/instances/create/main.tf create mode 100644 go.mod create mode 100644 go.sum create mode 100644 internal/models/common.go create mode 100644 internal/models/images.go create mode 100644 internal/models/instance.go create mode 100644 internal/models/instance_types.go create mode 100644 internal/models/kubernetes.go create mode 100644 internal/models/storages.go create mode 100644 internal/provider/filesystem_resource.go create mode 100644 internal/provider/filesystems_data_source.go create mode 100644 internal/provider/iks_cluster_resource.go create mode 100644 internal/provider/iks_data_source.go create mode 100644 internal/provider/iks_load_balancer_resource.go create mode 100644 internal/provider/iks_node_group_resource.go create mode 100644 internal/provider/instance_data_source.go create mode 100644 internal/provider/instance_resource.go create mode 100644 internal/provider/instance_types_data_source.go create mode 100644 internal/provider/machine_images_data_source.go create mode 100644 internal/provider/objectstore_resource.go create mode 100644 internal/provider/objectstore_user_resource.go create mode 100644 internal/provider/provider.go create mode 100644 internal/provider/provider_test.go create mode 100644 internal/provider/sshkey_data_source.go create mode 100644 internal/provider/sshkey_resource.go create mode 100644 internal/provider/utils.go create mode 100644 main.go create mode 100644 pkg/itacservices/client.go create mode 100644 pkg/itacservices/common/httpclient.go create mode 100644 pkg/itacservices/common/utils.go create mode 100644 pkg/itacservices/data_sources.go create mode 100644 pkg/itacservices/filesystems.go create mode 100644 pkg/itacservices/instances.go create mode 100644 pkg/itacservices/kubernetes.go create mode 100644 pkg/itacservices/object_storage.go create mode 100644 pkg/itacservices/sshkeys.go create mode 100644 tools/tools.go diff --git a/.copywrite.hcl b/.copywrite.hcl new file mode 100644 index 0000000..bdf3892 --- /dev/null +++ b/.copywrite.hcl @@ -0,0 +1,21 @@ +# NOTE: This file is for HashiCorp specific licensing automation and can be deleted after creating a new repo with this template. +schema_version = 1 + +project { + license = "MPL-2.0" + copyright_year = 2021 + + header_ignore = [ + # examples used within documentation (prose) + "examples/**", + + # GitHub issue template configuration + ".github/ISSUE_TEMPLATE/*.yml", + + # golangci-lint tooling configuration + ".golangci.yml", + + # GoReleaser tooling configuration + ".goreleaser.yml", + ] +} diff --git a/GNUmakefile b/GNUmakefile new file mode 100644 index 0000000..7771cd6 --- /dev/null +++ b/GNUmakefile @@ -0,0 +1,6 @@ +default: testacc + +# Run acceptance tests +.PHONY: testacc +testacc: + TF_ACC=1 go test ./... -v $(TESTARGS) -timeout 120m diff --git a/README.md b/README.md new file mode 100644 index 0000000..65b3ab3 --- /dev/null +++ b/README.md @@ -0,0 +1,66 @@ +# IDC Terraform Provider (Terraform Plugin Framework) + +This IDC provider plugin brings the power of Hashicorp's Terraform to Intel Developer Cloud (IDC). It allows developers to model and manage their IDC Resources through HCL IaaC (Infrastructure as a Code). + +## Requirements + +- [Terraform](https://developer.hashicorp.com/terraform/downloads) >= 1.0 +- [Go](https://golang.org/doc/install) >= 1.21 + +## Building The Provider + +1. Clone the repository +1. Enter the repository directory +1. Build the provider using the Go `install` command: + +```shell +go install +``` + +## Using the provider + +Currently, this plugin is not published to terraform registry and is available to be used in Local Dev mode locally. + +## Trying out the IDC Provider + +If you wish to work on the provider, you'll first need [Go](http://www.golang.org) installed on your machine (see [Requirements](#requirements) above). + +To compile the provider, run `go install`. This will build the provider and put the provider binary in the `$GOPATH/bin` directory. + +For local develoment, update the terraform config to point it to the local copy of the provider plugin. + +Edit `~/.terraformrc` file and add following config block + +``` +provider_installation { + + dev_overrides { + "cloud.intel.com/services/idc" = "<$GOPATH>/bin" + } + + # For all other providers, install them directly from their origin provider + # registries as normal. If you omit this, Terraform will _only_ use + # the dev_overrides block, and so no other providers will be available. + direct {} +} +``` + +## IDC Login Credentials +For creating resources on IDC, it requires auth credentials. More specifically, currently it requires following `two` environment variables to be configured. + +``` +export IDC_CLOUDACCOUNT= +export IDC_APITOKEN= +``` + +You can optionally, download and setup the following CLI tool to fetch it automatically. + +[IRR Binary Download](https://github.com/intel-innersource/applications.web.saas.optimization-registry.api/releases/tag/v0.23.5) + +``` +response=$(irr_darwin idc login --interactive --json) +IDC_CLOUDACCOUNT=$(echo $response | jq -r ".account_id") +IDC_APITOKEN=$(echo $response | jq -r ".tokens.access_token") +``` + +## Next Steps diff --git a/docs/data-sources/example.md b/docs/data-sources/example.md new file mode 100644 index 0000000..b19c8a0 --- /dev/null +++ b/docs/data-sources/example.md @@ -0,0 +1,30 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "scaffolding_example Data Source - scaffolding" +subcategory: "" +description: |- + Example data source +--- + +# scaffolding_example (Data Source) + +Example data source + +## Example Usage + +```terraform +data "scaffolding_example" "example" { + configurable_attribute = "some-value" +} +``` + + +## Schema + +### Optional + +- `configurable_attribute` (String) Example configurable attribute + +### Read-Only + +- `id` (String) Example identifier diff --git a/docs/functions/example.md b/docs/functions/example.md new file mode 100644 index 0000000..c65087d --- /dev/null +++ b/docs/functions/example.md @@ -0,0 +1,26 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "example function - scaffolding" +subcategory: "" +description: |- + Example function +--- + +# function: example + +Echoes given argument as result + + + +## Signature + + +```text +example(input string) string +``` + +## Arguments + + +1. `input` (String) String to echo + diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 0000000..7458d6d --- /dev/null +++ b/docs/index.md @@ -0,0 +1,26 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "scaffolding Provider" +subcategory: "" +description: |- + +--- + +# scaffolding Provider + + + +## Example Usage + +```terraform +provider "scaffolding" { + # example configuration here +} +``` + + +## Schema + +### Optional + +- `endpoint` (String) Example provider attribute diff --git a/docs/resources/example.md b/docs/resources/example.md new file mode 100644 index 0000000..5f3d5ca --- /dev/null +++ b/docs/resources/example.md @@ -0,0 +1,31 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "scaffolding_example Resource - scaffolding" +subcategory: "" +description: |- + Example resource +--- + +# scaffolding_example (Resource) + +Example resource + +## Example Usage + +```terraform +resource "scaffolding_example" "example" { + configurable_attribute = "some-value" +} +``` + + +## Schema + +### Optional + +- `configurable_attribute` (String) Example configurable attribute +- `defaulted` (String) Example configurable attribute with default value + +### Read-Only + +- `id` (String) Example identifier diff --git a/examples/datasource/main.tf b/examples/datasource/main.tf new file mode 100644 index 0000000..88028bf --- /dev/null +++ b/examples/datasource/main.tf @@ -0,0 +1,32 @@ +terraform { + required_providers { + idc = { + source = "hashicorps/idc" + } + } +} + + +provider "idc" { + region = "us-region-1" +} + +data "idc_machine_images" "images" { + most_recent = true + filters = [ + { + name = "name" + values = ["ubuntu-2204-jammy"] + } + ] +} + +# data "idc_instance_types" "insttypes" {} + +output "print_images" { + value = data.idc_machine_images.images +} + +# output "print_insttypes" { +# value = data.idc_instance_types.insttypes +# } \ No newline at end of file diff --git a/examples/gen-ai-xeon-opea-chatqna/cloud_init.yaml b/examples/gen-ai-xeon-opea-chatqna/cloud_init.yaml new file mode 100644 index 0000000..c22a944 --- /dev/null +++ b/examples/gen-ai-xeon-opea-chatqna/cloud_init.yaml @@ -0,0 +1,16 @@ +#cloud-config +package_update: true +package_upgrade: true + +package: + - git + +runcmd: + - apt install ansible -y + - git clone https://github.com/intel/optimized-cloud-recipes.git /tmp/optimized-cloud-recipes + - cd /tmp/optimized-cloud-recipes/recipes/ai-opea-codegen-xeon + - cp opea.sh /etc/profile.d/opea.sh + - echo 'export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}' | sudo tee -a /etc/profile.d/opea.sh + - chmod +x /etc/profile.d/opea.sh + - source /etc/profile.d/opea.sh + - ansible-playbook recipe.yml diff --git a/examples/gen-ai-xeon-opea-chatqna/main.tf b/examples/gen-ai-xeon-opea-chatqna/main.tf new file mode 100644 index 0000000..d6a0897 --- /dev/null +++ b/examples/gen-ai-xeon-opea-chatqna/main.tf @@ -0,0 +1,52 @@ +terraform { + required_providers { + idc = { + source = "hashicorps/idc" + } + } +} + +provider "idc" { + region = var.idc_region +} + +# data "cloudinit_config" "ansible" { +# gzip = true +# base64_encode = true + +# part { +# filename = "cloud_init" +# content_type = "text/cloud-config" +# content = templatefile( +# "cloud_init.yml", +# { +# HUGGINGFACEHUB_API_TOKEN=var.huggingface_token +# } +# ) +# } +# } + +# resource "idc_sshkey" "example" { +# metadata = { +# name = var.ssh_key_name +# } +# spec = { +# ssh_public_key = file(var.ssh_pubkey_path) +# owner_email = var.ssh_user_email +# } +# } + +resource "idc_instance" "example" { + name = var.instance_name + spec = { + instance_type = var.instance_types[var.instance_type] + machine_image = var.os_image + ssh_public_key_names = [var.ssh_key_name] + user_data = file("./cloud_init.yaml") + } + # depends_on = [idc_sshkey.example] +} + +output "instance_order" { + value = idc_instance.example +} diff --git a/examples/gen-ai-xeon-opea-chatqna/terraform.tfvars b/examples/gen-ai-xeon-opea-chatqna/terraform.tfvars new file mode 100644 index 0000000..2e34746 --- /dev/null +++ b/examples/gen-ai-xeon-opea-chatqna/terraform.tfvars @@ -0,0 +1,8 @@ +idc_region = "us-staging-1" +ssh_key_name = "shrimac" +instance_name = "genai-chatqna-demo3" +ssh_pubkey_path = "/Users/snadgowd/.ssh/id_ed25519.pub" +ssh_user_email = "shripad.nadgowda@intel.com" +instance_type = "vm-large" +filesystem_name = "shri-fs6" +filesystem_size_in_tb = 1 diff --git a/examples/gen-ai-xeon-opea-chatqna/variables.tf b/examples/gen-ai-xeon-opea-chatqna/variables.tf new file mode 100644 index 0000000..e329942 --- /dev/null +++ b/examples/gen-ai-xeon-opea-chatqna/variables.tf @@ -0,0 +1,80 @@ +variable "ssh_key_name" { + type = string +} + +variable "ssh_pubkey_path" { + type = string +} + +variable "ssh_user_email" { + type = string +} + +variable "instance_name" { + type = string +} + +variable "filesystem_description" { + type = string + default = "demo filesystem" +} + +variable "filesystem_name" { + type = string +} + +variable "filesystem_size_in_tb" { + type = number +} + +variable "filesystem_type" { + type = string + default = "ComputeGeneral" +} + +variable "idc_region" { + type = string + default = "us-region-2" +} + +variable "idc_availability_zone" { + type = string + default = "us-region-2a" +} + +variable "os_image" { + type = string + default = "ubuntu-2204-jammy-v20230122" +} + +variable instance_interface_spec { + type = map + default = { + "name" = "eth0" + "vnet" = "us-region-2a-default" + } +} + +variable instance_types { + type = map + default = { + "vm-small" = "vm-spr-sml" + "vm-large" = "vm-spr-lrg" + } +} + +variable instance_type { + type = string +} + +variable instance_count { + type = number + default = 1 +} + +# Variable for Huggingface Token +variable "huggingface_token" { + description = "Huggingface Token" + default = "" + type = string +} \ No newline at end of file diff --git a/examples/gen-ai-xeon-opea-codegen/cloud_init.yaml b/examples/gen-ai-xeon-opea-codegen/cloud_init.yaml new file mode 100644 index 0000000..c22a944 --- /dev/null +++ b/examples/gen-ai-xeon-opea-codegen/cloud_init.yaml @@ -0,0 +1,16 @@ +#cloud-config +package_update: true +package_upgrade: true + +package: + - git + +runcmd: + - apt install ansible -y + - git clone https://github.com/intel/optimized-cloud-recipes.git /tmp/optimized-cloud-recipes + - cd /tmp/optimized-cloud-recipes/recipes/ai-opea-codegen-xeon + - cp opea.sh /etc/profile.d/opea.sh + - echo 'export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}' | sudo tee -a /etc/profile.d/opea.sh + - chmod +x /etc/profile.d/opea.sh + - source /etc/profile.d/opea.sh + - ansible-playbook recipe.yml diff --git a/examples/gen-ai-xeon-opea-codegen/main.tf b/examples/gen-ai-xeon-opea-codegen/main.tf new file mode 100644 index 0000000..d6a0897 --- /dev/null +++ b/examples/gen-ai-xeon-opea-codegen/main.tf @@ -0,0 +1,52 @@ +terraform { + required_providers { + idc = { + source = "hashicorps/idc" + } + } +} + +provider "idc" { + region = var.idc_region +} + +# data "cloudinit_config" "ansible" { +# gzip = true +# base64_encode = true + +# part { +# filename = "cloud_init" +# content_type = "text/cloud-config" +# content = templatefile( +# "cloud_init.yml", +# { +# HUGGINGFACEHUB_API_TOKEN=var.huggingface_token +# } +# ) +# } +# } + +# resource "idc_sshkey" "example" { +# metadata = { +# name = var.ssh_key_name +# } +# spec = { +# ssh_public_key = file(var.ssh_pubkey_path) +# owner_email = var.ssh_user_email +# } +# } + +resource "idc_instance" "example" { + name = var.instance_name + spec = { + instance_type = var.instance_types[var.instance_type] + machine_image = var.os_image + ssh_public_key_names = [var.ssh_key_name] + user_data = file("./cloud_init.yaml") + } + # depends_on = [idc_sshkey.example] +} + +output "instance_order" { + value = idc_instance.example +} diff --git a/examples/gen-ai-xeon-opea-codegen/terraform.tfvars b/examples/gen-ai-xeon-opea-codegen/terraform.tfvars new file mode 100644 index 0000000..eec7456 --- /dev/null +++ b/examples/gen-ai-xeon-opea-codegen/terraform.tfvars @@ -0,0 +1,8 @@ +idc_region = "us-staging-1" +ssh_key_name = "shrimac" +instance_name = "genai-codegen-demo3" +ssh_pubkey_path = "/Users/snadgowd/.ssh/id_ed25519.pub" +ssh_user_email = "" +instance_type = "vm-large" +filesystem_name = "shri-fs6" +filesystem_size_in_tb = 1 diff --git a/examples/gen-ai-xeon-opea-codegen/variables.tf b/examples/gen-ai-xeon-opea-codegen/variables.tf new file mode 100644 index 0000000..e329942 --- /dev/null +++ b/examples/gen-ai-xeon-opea-codegen/variables.tf @@ -0,0 +1,80 @@ +variable "ssh_key_name" { + type = string +} + +variable "ssh_pubkey_path" { + type = string +} + +variable "ssh_user_email" { + type = string +} + +variable "instance_name" { + type = string +} + +variable "filesystem_description" { + type = string + default = "demo filesystem" +} + +variable "filesystem_name" { + type = string +} + +variable "filesystem_size_in_tb" { + type = number +} + +variable "filesystem_type" { + type = string + default = "ComputeGeneral" +} + +variable "idc_region" { + type = string + default = "us-region-2" +} + +variable "idc_availability_zone" { + type = string + default = "us-region-2a" +} + +variable "os_image" { + type = string + default = "ubuntu-2204-jammy-v20230122" +} + +variable instance_interface_spec { + type = map + default = { + "name" = "eth0" + "vnet" = "us-region-2a-default" + } +} + +variable instance_types { + type = map + default = { + "vm-small" = "vm-spr-sml" + "vm-large" = "vm-spr-lrg" + } +} + +variable instance_type { + type = string +} + +variable instance_count { + type = number + default = 1 +} + +# Variable for Huggingface Token +variable "huggingface_token" { + description = "Huggingface Token" + default = "" + type = string +} \ No newline at end of file diff --git a/examples/provider/provider.tf b/examples/provider/provider.tf new file mode 100644 index 0000000..942db45 --- /dev/null +++ b/examples/provider/provider.tf @@ -0,0 +1,3 @@ +provider "scaffolding" { + # example configuration here +} diff --git a/examples/resources/all_resources/main.tf b/examples/resources/all_resources/main.tf new file mode 100644 index 0000000..3c62ada --- /dev/null +++ b/examples/resources/all_resources/main.tf @@ -0,0 +1,126 @@ +terraform { + required_providers { + idc = { + source = "hashicorps/idc" + } + } +} + +provider "idc" { + region = var.idc_region +} + +locals { + name = "apollo11" + tags = { + environment = "Demo" + } +} + +data "idc_machine_images" "image" { + most_recent = true + filters = [ + { + name = "name" + values = ["ubuntu-2204-jammy"] + } + ] +} + +resource "idc_sshkey" "sshkey-1" { + metadata = { + name = "${local.name}-sshkey" + } + spec = { + ssh_public_key = file(var.ssh_pubkey_path) + owner_email = var.ssh_user_email + } +} + +resource "idc_instance" "myinstance-1" { + async = false + name = "${local.name}-instance" + availability_zone = var.idc_availability_zone + spec = { + instance_type = var.instance_types[var.instance_type] + machine_image = data.idc_machine_images.image.result.name + interface_specs = [{ + name = var.instance_interface_spec.name + vnet = var.instance_interface_spec.vnet + }] + ssh_public_key_names = [idc_sshkey.sshkey-1.metadata.name] + } + depends_on = [idc_sshkey.sshkey-1] +} + +resource "idc_filesystem" "fsvol-1" { + name = "${local.name}-filevol" + description = var.filesystem_description + availability_zone = var.idc_availability_zone + spec = { + size_in_gb = var.filesystem_size_in_gb + filesystem_type = var.filesystem_type + } +} + +resource "idc_object_storage_bucket" "bucket1" { + name = "${local.name}-bucket" + versioned = false +} + +resource "idc_object_storage_bucket_user" "user1" { + name = "${idc_object_storage_bucket.bucket1.name}-user" + bucket_id = "${idc_object_storage_bucket.bucket1.cloudaccount}-${idc_object_storage_bucket.bucket1.name}" + allow_actions = [ + "GetBucketLocation", + "GetBucketPolicy", + "ListBucket", + "ListBucketMultipartUploads", + "ListMultipartUploadParts", + "GetBucketTagging", + ] + allow_policies = { + path_prefix = "/" + policies = [ + "ReadBucket", + "WriteBucket", + "DeleteBucket", + ] + } +} + +resource "idc_iks_cluster" "cluster1" { + async = false + name = "${local.name}-iks" + availability_zone = var.idc_availability_zone + kubernetes_version = "1.27" + + storage = { + size_in_gb = 30 + } +} + +resource "idc_iks_node_group" "ng1" { + cluster_uuid = idc_iks_cluster.cluster1.id + name = "${local.name}-ng" + node_count = 1 + node_type = var.instance_types[var.instance_type] + userdata_url = "" + ssh_public_key_names = [idc_sshkey.sshkey-1.metadata.name] + interfaces = [{ + name = var.idc_availability_zone + vnet = var.instance_interface_spec.vnet + }] +} + +resource "idc_iks_lb" "lb1" { + cluster_uuid = idc_iks_cluster.cluster1.id + load_balancers = [ + { + name = "${local.name}-lb-pub2" + port = 80 + vip_type = "public" + } + ] + depends_on = [ idc_iks_node_group.ng1 ] +} diff --git a/examples/resources/all_resources/terraform.tfvars b/examples/resources/all_resources/terraform.tfvars new file mode 100644 index 0000000..0860ce1 --- /dev/null +++ b/examples/resources/all_resources/terraform.tfvars @@ -0,0 +1,5 @@ +idc_region = "us-region-2" +ssh_pubkey_path = "/Users/snadgowd/.ssh/id_ed25519.pub" +ssh_user_email = "shripad.nadgowda@intel.com" +instance_type = "vm-small" +filesystem_size_in_gb = 90 diff --git a/examples/resources/all_resources/variables.tf b/examples/resources/all_resources/variables.tf new file mode 100644 index 0000000..15408e6 --- /dev/null +++ b/examples/resources/all_resources/variables.tf @@ -0,0 +1,62 @@ +variable "ssh_pubkey_path" { + type = string +} + +variable "ssh_user_email" { + type = string +} + +variable "filesystem_description" { + type = string + default = "demo filesystem" +} + + +variable "filesystem_size_in_gb" { + type = number +} + +variable "filesystem_type" { + type = string + default = "ComputeGeneral" +} + +variable "idc_region" { + type = string + default = "region-2" +} + +variable "idc_availability_zone" { + type = string + default = "us-region-2a" +} + +variable "os_image" { + type = string + default = "ubuntu-2204-jammy-v20230122" +} + +variable instance_interface_spec { + type = map + default = { + "name" = "eth0" + "vnet" = "us-region-2a-default" + } +} + +variable instance_types { + type = map + default = { + "vm-small" = "vm-spr-sml" + "vm-large" = "vm-spr-lrg" + } +} + +variable instance_type { + type = string +} + +variable instance_count { + type = number + default = 1 +} \ No newline at end of file diff --git a/examples/resources/bucket_users/main.tf b/examples/resources/bucket_users/main.tf new file mode 100644 index 0000000..2848ba1 --- /dev/null +++ b/examples/resources/bucket_users/main.tf @@ -0,0 +1,45 @@ +terraform { + required_providers { + idc = { + source = "cloud.intel.com/services/idc" + } + } +} + +provider "idc" { + region = "staging-1" +} + +resource "idc_object_storage_bucket" "bucket1" { + name = "tf-demo-3" + versioned = false +} + +resource "idc_object_storage_bucket_user" "user1" { + name = "tf-demo3-user" + bucket_id = "${idc_object_storage_bucket.bucket1.cloudaccount}-${idc_object_storage_bucket.bucket1.name}" + allow_actions = [ + "GetBucketLocation", + "GetBucketPolicy", + "ListBucket", + "ListBucketMultipartUploads", + "ListMultipartUploadParts", + "GetBucketTagging", + ] + allow_policies = { + path_prefix = "/" + policies = [ + "ReadBucket", + "WriteBucket", + "DeleteBucket", + ] + } +} + +# output "bucket_order" { +# value = idc_object_storage_bucket.bucket1 +# } + +output "bucket_user" { + value = idc_object_storage_bucket_user.user1 +} diff --git a/examples/resources/buckets/main.tf b/examples/resources/buckets/main.tf new file mode 100644 index 0000000..7865f86 --- /dev/null +++ b/examples/resources/buckets/main.tf @@ -0,0 +1,20 @@ +terraform { + required_providers { + idc = { + source = "hashicorps/idc" + } + } +} + +provider "idc" { + region = "us-region-2" +} + +resource "idc_object_storage_bucket" "bucket1" { + name = "tf-demo99" + versioned = false +} + +output "bucket_order" { + value = idc_object_storage_bucket.bucket1 +} diff --git a/examples/resources/file_vm/main.tf b/examples/resources/file_vm/main.tf new file mode 100644 index 0000000..f542ed1 --- /dev/null +++ b/examples/resources/file_vm/main.tf @@ -0,0 +1,60 @@ +terraform { + required_providers { + idc = { + source = "cloud.intel.com/services/idc" + } + } +} + +provider "idc" { + region = var.idc_region +} + +resource "idc_sshkey" "sshkey-1" { + metadata = { + name = var.ssh_key_name + } + spec = { + ssh_public_key = file(var.ssh_pubkey_path) + owner_email = var.ssh_user_email + } +} + +resource "idc_instance" "myinstance-1" { + instance = { + name = var.instance_name + spec = { + availability_zone = var.idc_availability_zone + instance_type = var.instance_types[var.instance_type] + machine_image = var.os_image + interface_specs = [{ + name = var.instance_interface_spec.name + vnet = var.instance_interface_spec.vnet + }] + ssh_public_key_names = [var.ssh_key_name] + } + } + depends_on = [idc_sshkey.sshkey-1] +} + +resource "idc_filesystem" "fsvol-1" { + filesystem = { + name = var.filesystem_name + description = var.filesystem_description + spec = { + size_in_gb = var.filesystem_size_in_gb + filesystem_type = var.filesystem_type + } + } +} + +resource "idc_object_storage_bucket" "bucket1" { + name = "tf-bucket-1" + versioned = false +} + + +output "filesystem_order" { + value = idc_filesystem.example +} + diff --git a/examples/resources/file_vm/terraform.tfvars b/examples/resources/file_vm/terraform.tfvars new file mode 100644 index 0000000..47da0de --- /dev/null +++ b/examples/resources/file_vm/terraform.tfvars @@ -0,0 +1,8 @@ +idc_region = "staging-1" +ssh_key_name = "shri-tfkey5" +instance_name = "shri-instance5" +ssh_pubkey_path = "/Users/snadgowd/.ssh/id_ed25519.pub" +ssh_user_email = "shripad.nadgowda@intel.com" +instance_type = "vm-small" +filesystem_name = "shri-fs6" +filesystem_size_in_gb = 90 diff --git a/examples/resources/file_vm/variables.tf b/examples/resources/file_vm/variables.tf new file mode 100644 index 0000000..9d4f574 --- /dev/null +++ b/examples/resources/file_vm/variables.tf @@ -0,0 +1,73 @@ +variable "ssh_key_name" { + type = string +} + +variable "ssh_pubkey_path" { + type = string +} + +variable "ssh_user_email" { + type = string +} + +variable "instance_name" { + type = string +} + +variable "filesystem_description" { + type = string + default = "demo filesystem" +} + +variable "filesystem_name" { + type = string +} + +variable "filesystem_size_in_gb" { + type = number +} + +variable "filesystem_type" { + type = string + default = "ComputeGeneral" +} + +variable "idc_region" { + type = string + default = "staging-1" +} + +variable "idc_availability_zone" { + type = string + default = "us-staging-1a" +} + +variable "os_image" { + type = string + default = "ubuntu-2204-jammy-v20230122" +} + +variable instance_interface_spec { + type = map + default = { + "name" = "eth0" + "vnet" = "us-staging-1a-default" + } +} + +variable instance_types { + type = map + default = { + "vm-small" = "vm-spr-sml" + "vm-large" = "vm-spr-lrg" + } +} + +variable instance_type { + type = string +} + +variable instance_count { + type = number + default = 1 +} \ No newline at end of file diff --git a/examples/resources/filesystems/create/main.tf b/examples/resources/filesystems/create/main.tf new file mode 100644 index 0000000..bf86364 --- /dev/null +++ b/examples/resources/filesystems/create/main.tf @@ -0,0 +1,22 @@ +terraform { + required_providers { + intelcloud = { + source = "hashicorps/intelcloud" + } + } +} + +provider "intelcloud" { + region = "us-staging-1" +} + +resource "intelcloud_filesystem" "example" { + name = "tf-demo79" + spec = { + size_in_tb = 1 + } +} + +output "filesystem_order" { + value = intelcloud_filesystem.example +} diff --git a/examples/resources/filesystems/list/main.tf b/examples/resources/filesystems/list/main.tf new file mode 100644 index 0000000..b74b3b3 --- /dev/null +++ b/examples/resources/filesystems/list/main.tf @@ -0,0 +1,18 @@ +terraform { + required_providers { + idc = { + source = "hashicorps/idc" + } + } +} + +provider "idc" { + region = "us-region-2" +} + +data "idc_filesystems" "example" {} + +output "test_storages" { + value = data.idc_filesystems.example +} + diff --git a/examples/resources/iks/create-prod/main.tf b/examples/resources/iks/create-prod/main.tf new file mode 100644 index 0000000..ad49095 --- /dev/null +++ b/examples/resources/iks/create-prod/main.tf @@ -0,0 +1,61 @@ +terraform { + required_providers { + idc = { + source = "cloud.intel.com/services/idc" + } + # random = { + # source = "hashicorp/random" + # version = "3.6.2" + # } + } +} + +provider "idc" { + region = "us-region-1" +} + +# provider "random" { +# # Configuration options +# } + +# resource "random_pet" "prefix" {} + +locals { + # name = "${random_pet.prefix.id}" + name = "testdemo99" + availability_zone = "us-region-1a" + tags = { + environment = "Demo" + } +} + + +resource "idc_kubernetes_cluster" "default" { + async = true + kubernetes_cluster = { + name = "${local.name}-iks" + availability_zone = local.availability_zone + kubernetes_version = "1.27" + node_pools = [ + { + name = "${local.name}-ng" + node_count = 2 + node_type = "vm-small" + user_data_url = "" + } + ] + ssh_public_key_names = ["var.ssh_key_name"] + storage = { + size_in_gb = 30 + } + load_balancer = { + name = "${local.name}-lb" + port = 443 + type = "public" + } + } +} + +output "iks_order" { + value = idc_kubernetes_cluster.default +} diff --git a/examples/resources/iks/create/main.tf b/examples/resources/iks/create/main.tf new file mode 100644 index 0000000..87d2a0f --- /dev/null +++ b/examples/resources/iks/create/main.tf @@ -0,0 +1,72 @@ +terraform { + required_providers { + idc = { + source = "cloud.intel.com/services/idc" + } + # random = { + # source = "hashicorp/random" + # version = "3.6.2" + # } + } +} + +provider "idc" { + region = "us-region-1" +} + +# provider "random" { +# # Configuration options +# } + +# resource "random_pet" "prefix" {} + +locals { + # name = "${random_pet.prefix.id}" + name = "iks-tf-2" + availability_zone = "us-region-1a" + tags = { + environment = "Demo" + } +} + +resource "idc_iks_cluster" "cluster1" { + async = false + name = "${local.name}-iks" + availability_zone = local.availability_zone + kubernetes_version = "1.27" + + storage = { + size_in_gb = 30 + } +} + +resource "idc_iks_node_group" "ng1" { + cluster_uuid = idc_iks_cluster.cluster1.id + # cluster_uuid = "cl-ui2juj6vkq" + name = "${local.name}-ng" + node_count = 1 + node_type = "vm-spr-sml" + userdata_url = "" + ssh_public_key_names = ["shrimac"] + interfaces = [{ + name = "us-region-1a" + vnet = "us-region-1a-default" + }] +} + +resource "idc_iks_lb" "lb1" { + cluster_uuid = idc_iks_cluster.cluster1.id + # cluster_uuid = "cl-ui2juj6vkq" + load_balancers = [ + { + name = "${local.name}-lb-pub2" + port = 80 + vip_type = "public" + } + ] + depends_on = [ idc_iks_node_group.ng1 ] +} + +# output "iks_order" { +# value = idc_iks_cluster.cluster1 +# } diff --git a/examples/resources/iks/list/main.tf b/examples/resources/iks/list/main.tf new file mode 100644 index 0000000..b57bb1b --- /dev/null +++ b/examples/resources/iks/list/main.tf @@ -0,0 +1,17 @@ +terraform { + required_providers { + idc = { + source = "cloud.intel.com/services/idc" + } + } +} + +provider "idc" { + region = "staging-1" +} + +data "idc_kubernetes_clusters" "default" {} + +output "test_cluster" { + value = data.idc_kubernetes_clusters.default +} diff --git a/examples/resources/iks/nodegroup/main.tf b/examples/resources/iks/nodegroup/main.tf new file mode 100644 index 0000000..d27a3ad --- /dev/null +++ b/examples/resources/iks/nodegroup/main.tf @@ -0,0 +1,37 @@ +terraform { + required_providers { + idc = { + source = "cloud.intel.com/services/idc" + } + } +} + +provider "idc" { + region = "staging-3" +} + +locals { + # name = "${random_pet.prefix.id}" + name = "testdemo97" + availability_zone = "us-region-1a" + tags = { + environment = "Demo" + } +} + +resource "idc_iks_node_group" "ng1" { + cluster_uuid = "cl-lc2ze6pu4i" + name = "${local.name}-ng" + node_count = 2 + node_type = "vm-spr-sml" + userdata_url = "" + ssh_public_key_names = ["shrimac"] + interfaces = [{ + name = "us-staging-3a" + vnet = "us-staging-3a-default" + }] +} + +output "iks_order" { + value = idc_iks_node_group.ng1 +} diff --git a/examples/resources/instances/create/main.tf b/examples/resources/instances/create/main.tf new file mode 100644 index 0000000..ed6a8bd --- /dev/null +++ b/examples/resources/instances/create/main.tf @@ -0,0 +1,34 @@ +terraform { + required_providers { + intelcloud = { + source = "hashicorps/intelcloud" + } + } +} + +provider "intelcloud" { + region = "us-staging-1" +} + +data "intelcloud_machine_images" "image" { + most_recent = true + filters = [ + { + name = "name" + values = ["ubuntu-2204-jammy"] + } + ] +} + +resource "intelcloud_instance" "example" { + name = "tf-demo-instance" + spec = { + instance_type = "vm-spr-sml" + machine_image = data.intelcloud_machine_images.image.result.name + ssh_public_key_names = ["shrimac"] + } +} + +output "instance_order" { + value = intelcloud_instance.example +} diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..359fc9f --- /dev/null +++ b/go.mod @@ -0,0 +1,74 @@ +module terraform-provider-intelcloud + +go 1.21 + +require ( + github.com/hashicorp/terraform-plugin-docs v0.19.2 + github.com/hashicorp/terraform-plugin-framework v1.8.0 + github.com/hashicorp/terraform-plugin-go v0.22.2 + github.com/hashicorp/terraform-plugin-log v0.9.0 + github.com/sethvargo/go-retry v0.2.4 +) + +require ( + github.com/BurntSushi/toml v1.2.1 // indirect + github.com/Kunde21/markdownfmt/v3 v3.1.0 // indirect + github.com/Masterminds/goutils v1.1.1 // indirect + github.com/Masterminds/semver/v3 v3.2.0 // indirect + github.com/Masterminds/sprig/v3 v3.2.3 // indirect + github.com/ProtonMail/go-crypto v1.1.0-alpha.2 // indirect + github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect + github.com/armon/go-radix v1.0.0 // indirect + github.com/bgentry/speakeasy v0.1.0 // indirect + github.com/bmatcuk/doublestar/v4 v4.6.1 // indirect + github.com/cloudflare/circl v1.3.7 // indirect + github.com/fatih/color v1.16.0 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/hashicorp/cli v1.1.6 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-checkpoint v0.5.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-hclog v1.6.2 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-plugin v1.6.0 // indirect + github.com/hashicorp/go-uuid v1.0.3 // indirect + github.com/hashicorp/go-version v1.6.0 // indirect + github.com/hashicorp/hc-install v0.6.4 // indirect + github.com/hashicorp/terraform-exec v0.20.0 // indirect + github.com/hashicorp/terraform-json v0.21.0 // indirect + github.com/hashicorp/terraform-plugin-framework-jsontypes v0.1.0 // indirect + github.com/hashicorp/terraform-registry-address v0.2.3 // indirect + github.com/hashicorp/terraform-svchost v0.1.1 // indirect + github.com/hashicorp/yamux v0.1.1 // indirect + github.com/huandu/xstrings v1.3.3 // indirect + github.com/imdario/mergo v0.3.15 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-runewidth v0.0.9 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/go-testing-interface v1.14.1 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/oklog/run v1.0.0 // indirect + github.com/posener/complete v1.2.3 // indirect + github.com/shopspring/decimal v1.3.1 // indirect + github.com/spf13/cast v1.5.0 // indirect + github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect + github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect + github.com/yuin/goldmark v1.7.1 // indirect + github.com/yuin/goldmark-meta v1.1.0 // indirect + github.com/zclconf/go-cty v1.14.4 // indirect + go.abhg.dev/goldmark/frontmatter v0.2.0 // indirect + golang.org/x/crypto v0.21.0 // indirect + golang.org/x/exp v0.0.0-20230809150735-7b3493d9a819 // indirect + golang.org/x/mod v0.16.0 // indirect + golang.org/x/net v0.23.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/text v0.14.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de // indirect + google.golang.org/grpc v1.63.2 // indirect + google.golang.org/protobuf v1.33.0 // indirect + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect + gopkg.in/yaml.v2 v2.3.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..f3be0ef --- /dev/null +++ b/go.sum @@ -0,0 +1,241 @@ +dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= +dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= +github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/Kunde21/markdownfmt/v3 v3.1.0 h1:KiZu9LKs+wFFBQKhrZJrFZwtLnCCWJahL+S+E/3VnM0= +github.com/Kunde21/markdownfmt/v3 v3.1.0/go.mod h1:tPXN1RTyOzJwhfHoon9wUr4HGYmWgVxSQN6VBJDkrVc= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g= +github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= +github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/ProtonMail/go-crypto v1.1.0-alpha.2 h1:bkyFVUP+ROOARdgCiJzNQo2V2kiB97LyUpzH9P6Hrlg= +github.com/ProtonMail/go-crypto v1.1.0-alpha.2/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= +github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= +github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= +github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bmatcuk/doublestar/v4 v4.6.1 h1:FH9SifrbvJhnlQpztAx++wlkk70QBf0iBWDwNy7PA4I= +github.com/bmatcuk/doublestar/v4 v4.6.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= +github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA= +github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8= +github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= +github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= +github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= +github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= +github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= +github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= +github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= +github.com/go-git/go-git/v5 v5.12.0 h1:7Md+ndsjrzZxbddRDZjF14qK+NN56sy6wkqaVrjZtys= +github.com/go-git/go-git/v5 v5.12.0/go.mod h1:FTM9VKtnI2m65hNI/TenDDDnUf2Q9FHnXYjuz9i5OEY= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/hashicorp/cli v1.1.6 h1:CMOV+/LJfL1tXCOKrgAX0uRKnzjj/mpmqNXloRSy2K8= +github.com/hashicorp/cli v1.1.6/go.mod h1:MPon5QYlgjjo0BSoAiN0ESeT5fRzDjVRp+uioJ0piz4= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-checkpoint v0.5.0 h1:MFYpPZCnQqQTE18jFwSII6eUQrD/oxMFp3mlgcqk5mU= +github.com/hashicorp/go-checkpoint v0.5.0/go.mod h1:7nfLNL10NsxqO4iWuW6tWW0HjZuDrwkBuEQsVcpCOgg= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v1.6.2 h1:NOtoftovWkDheyUM/8JW3QMiXyxJK3uHRK7wV04nD2I= +github.com/hashicorp/go-hclog v1.6.2/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-plugin v1.6.0 h1:wgd4KxHJTVGGqWBq4QPB1i5BZNEx9BR8+OFmHDmTk8A= +github.com/hashicorp/go-plugin v1.6.0/go.mod h1:lBS5MtSSBZk0SHc66KACcjjlU6WzEVP/8pwz68aMkCI= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/hc-install v0.6.4 h1:QLqlM56/+SIIGvGcfFiwMY3z5WGXT066suo/v9Km8e0= +github.com/hashicorp/hc-install v0.6.4/go.mod h1:05LWLy8TD842OtgcfBbOT0WMoInBMUSHjmDx10zuBIA= +github.com/hashicorp/terraform-exec v0.20.0 h1:DIZnPsqzPGuUnq6cH8jWcPunBfY+C+M8JyYF3vpnuEo= +github.com/hashicorp/terraform-exec v0.20.0/go.mod h1:ckKGkJWbsNqFKV1itgMnE0hY9IYf1HoiekpuN0eWoDw= +github.com/hashicorp/terraform-json v0.21.0 h1:9NQxbLNqPbEMze+S6+YluEdXgJmhQykRyRNd+zTI05U= +github.com/hashicorp/terraform-json v0.21.0/go.mod h1:qdeBs11ovMzo5puhrRibdD6d2Dq6TyE/28JiU4tIQxk= +github.com/hashicorp/terraform-plugin-docs v0.19.2 h1:YjdKa1vuqt9EnPYkkrv9HnGZz175HhSJ7Vsn8yZeWus= +github.com/hashicorp/terraform-plugin-docs v0.19.2/go.mod h1:gad2aP6uObFKhgNE8DR9nsEuEQnibp7il0jZYYOunWY= +github.com/hashicorp/terraform-plugin-framework v1.8.0 h1:P07qy8RKLcoBkCrY2RHJer5AEvJnDuXomBgou6fD8kI= +github.com/hashicorp/terraform-plugin-framework v1.8.0/go.mod h1:/CpTukO88PcL/62noU7cuyaSJ4Rsim+A/pa+3rUVufY= +github.com/hashicorp/terraform-plugin-framework-jsontypes v0.1.0 h1:b8vZYB/SkXJT4YPbT3trzE6oJ7dPyMy68+9dEDKsJjE= +github.com/hashicorp/terraform-plugin-framework-jsontypes v0.1.0/go.mod h1:tP9BC3icoXBz72evMS5UTFvi98CiKhPdXF6yLs1wS8A= +github.com/hashicorp/terraform-plugin-go v0.22.2 h1:5o8uveu6eZUf5J7xGPV0eY0TPXg3qpmwX9sce03Bxnc= +github.com/hashicorp/terraform-plugin-go v0.22.2/go.mod h1:drq8Snexp9HsbFZddvyLHN6LuWHHndSQg+gV+FPkcIM= +github.com/hashicorp/terraform-plugin-log v0.9.0 h1:i7hOA+vdAItN1/7UrfBqBwvYPQ9TFvymaRGZED3FCV0= +github.com/hashicorp/terraform-plugin-log v0.9.0/go.mod h1:rKL8egZQ/eXSyDqzLUuwUYLVdlYeamldAHSxjUFADow= +github.com/hashicorp/terraform-registry-address v0.2.3 h1:2TAiKJ1A3MAkZlH1YI/aTVcLZRu7JseiXNRHbOAyoTI= +github.com/hashicorp/terraform-registry-address v0.2.3/go.mod h1:lFHA76T8jfQteVfT7caREqguFrW3c4MFSPhZB7HHgUM= +github.com/hashicorp/terraform-svchost v0.1.1 h1:EZZimZ1GxdqFRinZ1tpJwVxxt49xc/S52uzrw4x0jKQ= +github.com/hashicorp/terraform-svchost v0.1.1/go.mod h1:mNsjQfZyf/Jhz35v6/0LWcv26+X7JPS+buii2c9/ctc= +github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= +github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= +github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= +github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= +github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= +github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c= +github.com/jhump/protoreflect v1.15.1/go.mod h1:jD/2GMKKE6OqX8qTjhADU1e6DShO+gavG9e0Q693nKo= +github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= +github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= +github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= +github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXqo= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= +github.com/sethvargo/go-retry v0.2.4 h1:T+jHEQy/zKJf5s95UkguisicE0zuF9y7+/vgz08Ocec= +github.com/sethvargo/go-retry v0.2.4/go.mod h1:1afjQuvh7s4gflMObvjLPaWgluLLyhA1wmVZ6KLpICw= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= +github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/skeema/knownhosts v1.2.2 h1:Iug2P4fLmDw9f41PB6thxUkNUkJzB5i+1/exaj40L3A= +github.com/skeema/knownhosts v1.2.2/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8= +github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok= +github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= +github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= +github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= +github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yuin/goldmark v1.7.1 h1:3bajkSilaCbjdKVsKdZjZCLBNPL9pYzrCakKaf4U49U= +github.com/yuin/goldmark v1.7.1/go.mod h1:uzxRWxtg69N339t3louHJ7+O03ezfj6PlliRlaOzY1E= +github.com/yuin/goldmark-meta v1.1.0 h1:pWw+JLHGZe8Rk0EGsMVssiNb/AaPMHfSRszZeUeiOUc= +github.com/yuin/goldmark-meta v1.1.0/go.mod h1:U4spWENafuA7Zyg+Lj5RqK/MF+ovMYtBvXi1lBb2VP0= +github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8= +github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +go.abhg.dev/goldmark/frontmatter v0.2.0 h1:P8kPG0YkL12+aYk2yU3xHv4tcXzeVnN+gU0tJ5JnxRw= +go.abhg.dev/goldmark/frontmatter v0.2.0/go.mod h1:XqrEkZuM57djk7zrlRUB02x8I5J0px76YjkOzhB4YlU= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/exp v0.0.0-20230809150735-7b3493d9a819 h1:EDuYyU/MkFXllv9QF9819VlI9a4tzGuCbhG0ExK9o1U= +golang.org/x/exp v0.0.0-20230809150735-7b3493d9a819/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic= +golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de h1:cZGRis4/ot9uVm639a+rHCUaG0JJHEsdyzSQTMX+suY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY= +google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= +google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/internal/models/common.go b/internal/models/common.go new file mode 100644 index 0000000..691c987 --- /dev/null +++ b/internal/models/common.go @@ -0,0 +1,10 @@ +package models + +import "github.com/hashicorp/terraform-plugin-framework/types" + +type ResourceMetadata struct { + ResourceId types.String `tfsdk:"resourceid"` + Cloudaccount types.String `tfsdk:"cloudaccount"` + Name types.String `tfsdk:"name"` + CreatedAt types.String `tfsdk:"createdat"` +} diff --git a/internal/models/images.go b/internal/models/images.go new file mode 100644 index 0000000..63e45b4 --- /dev/null +++ b/internal/models/images.go @@ -0,0 +1,10 @@ +package models + +import "github.com/hashicorp/terraform-plugin-framework/types" + +type MachineImage struct { + Name types.String `tfsdk:"name"` + Description types.String `tfsdk:"description"` + InstanceCategory []types.String `tfsdk:"instance_category"` + InstanceTypes []types.String `tfsdk:"instance_types"` +} diff --git a/internal/models/instance.go b/internal/models/instance.go new file mode 100644 index 0000000..74f4f00 --- /dev/null +++ b/internal/models/instance.go @@ -0,0 +1,79 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// InstanceModel maps IDC Compute Instance schema data. +type InstanceModel struct { + ResourceId types.String `tfsdk:"resource_id"` + Cloudaccount types.String `tfsdk:"cloudaccount"` + Name types.String `tfsdk:"name"` + Spec InstanceSpec `tfsdk:"spec"` + Status types.String `tfsdk:"status"` + Interfaces types.List `tfsdk:"interfaces"` + SSHProxy types.Object `tfsdk:"ssh_proxy"` + AccessInfo types.Object `tfsdk:"access_info"` +} + +type InstanceSpec struct { + InstanceGroup types.String `tfsdk:"instance_group"` + InstanceType types.String `tfsdk:"instance_type"` + MachineImage types.String `tfsdk:"machine_image"` + SSHPublicKeyNames []types.String `tfsdk:"ssh_public_key_names"` + UserData types.String `tfsdk:"user_data"` +} + +type NetworkInterfaceSpec struct { + Name types.String `tfsdk:"name"` + VNet types.String `tfsdk:"vnet"` +} + +type NetworkInterface struct { + Addresses types.String `tfsdk:"address"` + DNSName types.String `tfsdk:"dns_name"` + Gateway types.String `tfsdk:"gateway"` + Name types.String `tfsdk:"name"` + PrefixLength types.Int64 `tfsdk:"prefix_length"` + Subnet types.String `tfsdk:"subnet"` + VNet types.String `tfsdk:"vnet"` +} + +var ProviderInterfaceAttributes = map[string]attr.Type{ + "address": types.StringType, + "dns_name": types.StringType, + "gateway": types.StringType, + "name": types.StringType, + "prefix_length": types.Int64Type, + "subnet": types.StringType, + "vnet": types.StringType, +} + +func (m NetworkInterface) AttributeTypes() map[string]attr.Type { + return map[string]attr.Type{} +} + +type InstanceAccessInfoModel struct { + Username types.String `tfsdk:"username"` +} + +func (m InstanceAccessInfoModel) AttributeTypes() map[string]attr.Type { + return map[string]attr.Type{ + "username": types.StringType, + } +} + +type SSHProxyModel struct { + ProxyAddress types.String `tfsdk:"address"` + ProxyPort types.Int64 `tfsdk:"port"` + ProxyUser types.String `tfsdk:"user"` +} + +func (m SSHProxyModel) AttributeTypes() map[string]attr.Type { + return map[string]attr.Type{ + "address": types.StringType, + "port": types.Int64Type, + "user": types.StringType, + } +} diff --git a/internal/models/instance_types.go b/internal/models/instance_types.go new file mode 100644 index 0000000..37be8f5 --- /dev/null +++ b/internal/models/instance_types.go @@ -0,0 +1,9 @@ +package models + +import "github.com/hashicorp/terraform-plugin-framework/types" + +type InstanceType struct { + Name types.String `tfsdk:"name"` + Description types.String `tfsdk:"description"` + InstanceCategory types.String `tfsdk:"instance_category"` +} diff --git a/internal/models/kubernetes.go b/internal/models/kubernetes.go new file mode 100644 index 0000000..f0b6921 --- /dev/null +++ b/internal/models/kubernetes.go @@ -0,0 +1,107 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type KubernetesClusterModel struct { + ClusterUUID types.String `tfsdk:"uuid"` + Cloudaccount types.String `tfsdk:"cloudaccount"` + Name types.String `tfsdk:"name"` + AvailabilityZone types.String `tfsdk:"availability_zone"` + K8sversion types.String `tfsdk:"kubernetes_version"` + ClusterStatus types.String `tfsdk:"cluster_status"` + Network types.Object `tfsdk:"network"` + NodeGroups types.List `tfsdk:"node_groups"` + SSHPublicKeyNames []types.String `tfsdk:"ssh_public_key_names"` + Storage types.List `tfsdk:"storages"` + LoadBalancer types.List `tfsdk:"load_balancers"` + UpgardeAvailable types.Bool `tfsdk:"upgrade_available"` + UpgradableVersions []types.String `tfsdk:"upgrade_k8s_versions_available"` +} + +type IKSClusterModel struct { + ClusterUUID types.String `tfsdk:"uuid"` + Cloudaccount types.String `tfsdk:"cloudaccount"` + Name types.String `tfsdk:"name"` + AvailabilityZone types.String `tfsdk:"availability_zone"` + K8sversion types.String `tfsdk:"kubernetes_version"` + ClusterStatus types.String `tfsdk:"cluster_status"` + Network types.Object `tfsdk:"network"` + UpgardeAvailable types.Bool `tfsdk:"upgrade_available"` + + // UpgradableVersions []types.String `tfsdk:"upgrade_k8s_versions_available"` +} + +var UpgradableVersionAttributes = []types.String{} + +type IKSStorage struct { + Size types.Int64 `tfsdk:"size_in_gb"` + State types.String `tfsdk:"state"` + StorageProvider types.String `tfsdk:"storage_provider"` +} + +var IKStorageAttributes = map[string]attr.Type{ + "size_in_gb": types.StringType, + "state": types.StringType, + "storage_provider": types.StringType, +} + +type ClusterNetwork struct { + ClusterCIDR types.String `tfsdk:"cluster_cidr"` + ClusterDNS types.String `tfsdk:"cluster_dns"` + EnableLB types.Bool `tfsdk:"enable_lb"` + ServiceCIDR types.String `tfsdk:"service_cidr"` +} + +func (m ClusterNetwork) AttributeTypes() map[string]attr.Type { + return map[string]attr.Type{ + "cluster_cidr": types.StringType, + "cluster_dns": types.StringType, + "enable_lb": types.BoolType, + "service_cidr": types.StringType, + } +} + +type NodeGroup struct { + ID types.String `tfsdk:"id"` + Count types.Int64 `tfsdk:"ng_count"` + Name types.String `tfsdk:"name"` + InstanceType types.String `tfsdk:"instance_type"` + IMIId types.String `tfsdk:"imiid"` + State types.String `tfsdk:"state"` + UserDataURL types.String `tfsdk:"userdata_url"` + SSHPublicKeyNames []types.String `tfsdk:"ssh_public_key_names"` + Interfaces []NetworkInterfaceSpec `tfsdk:"interfaces"` +} + +var NodeGroupAttributes = map[string]attr.Type{ + "id": types.StringType, + "ng_count": types.Int64Type, + "name": types.StringType, + "instance_type": types.StringType, + "imiid": types.StringType, + "state": types.StringType, + "userdata_url": types.StringType, +} + +type IKSLoadBalancer struct { + ID types.String `tfsdk:"id"` + Name types.String `tfsdk:"name"` + VipState types.String `tfsdk:"vip_state"` + VipIp types.String `tfsdk:"vip_ip"` + Port types.Int64 `tfsdk:"port"` + PoolPort types.Int64 `tfsdk:"pool_port"` + VipType types.String `tfsdk:"vip_type"` +} + +var IKSLoadLalancerAttributes = map[string]attr.Type{ + "id": types.StringType, + "name": types.StringType, + "vip_state": types.StringType, + "vip_ip": types.StringType, + "port": types.Int64Type, + "pool_port": types.Int64Type, + "vip_type": types.StringType, +} diff --git a/internal/models/storages.go b/internal/models/storages.go new file mode 100644 index 0000000..6600c9a --- /dev/null +++ b/internal/models/storages.go @@ -0,0 +1,88 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type FilesystemCreateRequest struct { + Name types.String `tfsdk:"name"` + Description types.String `tfsdk:"description"` + Spec FilesystemSpec `tfsdk:"spec"` +} + +type FilesystemModel struct { + ResourceId types.String `tfsdk:"resource_id"` + Cloudaccount types.String `tfsdk:"cloudaccount"` + Name types.String `tfsdk:"name"` + Description types.String `tfsdk:"description"` + AvailabilityZone types.String `tfsdk:"availability_zone"` + Spec FilesystemSpec `tfsdk:"spec"` + Status types.String `tfsdk:"status"` + ClusterInfo types.Object `tfsdk:"cluster_info"` + AccessInfo types.Object `tfsdk:"access_info"` +} + +type FilesystemSpec struct { + Size types.Int64 `tfsdk:"size_in_tb"` + AccessMode types.String `tfsdk:"access_mode"` + Encrypted types.Bool `tfsdk:"encrypted"` + FilesystemType types.String `tfsdk:"filesystem_type"` + StorageClass types.String `tfsdk:"storage_class"` +} + +type ObjectStoreSpec struct { + Versioned types.Bool `tfsdk:"versioned"` +} + +type FilesystemClusteModel struct { + ClusterAddress types.String `tfsdk:"cluster_address"` + ClusterVersion types.String `tfsdk:"cluster_version"` +} + +type FilesystemAccessModel struct { + Namespace types.String `tfsdk:"namespace"` + Filesystem types.String `tfsdk:"filesystem_name"` + Username types.String `tfsdk:"username"` + Password types.String `tfsdk:"password"` +} + +func (m FilesystemAccessModel) AttributeTypes() map[string]attr.Type { + return map[string]attr.Type{ + "namespace": types.StringType, + "filesystem_name": types.StringType, + "username": types.StringType, + "password": types.StringType, + } +} + +func (m FilesystemClusteModel) AttributeTypes() map[string]attr.Type { + return map[string]attr.Type{ + "cluster_address": types.StringType, + "cluster_version": types.StringType, + } +} + +type NetworkSecurityGroup struct { + Gateway types.String `tfsdk:"gateway"` + PrefixLength types.Int64 `tfsdk:"prefix_length"` + Subnet types.String `tfsdk:"subnet"` +} + +var NetworkSecurityGroupAttributes = map[string]attr.Type{ + "gateway": types.StringType, + "prefix_length": types.Int64Type, + "subnet": types.StringType, +} + +type ObjectUserAccessModel struct { + AccessKey types.String `tfsdk:"access_key"` + SecretKey types.String `tfsdk:"secret_key"` +} + +func (m ObjectUserAccessModel) AttributeTypes() map[string]attr.Type { + return map[string]attr.Type{ + "access_key": types.StringType, + "secret_key": types.StringType, + } +} diff --git a/internal/provider/filesystem_resource.go b/internal/provider/filesystem_resource.go new file mode 100644 index 0000000..1f7c3fd --- /dev/null +++ b/internal/provider/filesystem_resource.go @@ -0,0 +1,347 @@ +package provider + +import ( + "context" + "fmt" + "strconv" + "strings" + + "terraform-provider-intelcloud/internal/models" + "terraform-provider-intelcloud/pkg/itacservices" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/booldefault" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &filesystemResource{} + _ resource.ResourceWithConfigure = &filesystemResource{} + _ resource.ResourceWithImportState = &filesystemResource{} +) + +// filesystemModel maps the resource schema data. +type filesystemResourceModel struct { + ID types.String `tfsdk:"id"` + Cloudaccount types.String `tfsdk:"cloudaccount"` + Name types.String `tfsdk:"name"` + Description types.String `tfsdk:"description"` + AvailabilityZone types.String `tfsdk:"availability_zone"` + Spec *models.FilesystemSpec `tfsdk:"spec"` + Status types.String `tfsdk:"status"` + ClusterInfo types.Object `tfsdk:"cluster_info"` + AccessInfo types.Object `tfsdk:"access_info"` +} + +// NewFilesystemResource is a helper function to simplify the provider implementation. +func NewFilesystemResource() resource.Resource { + return &filesystemResource{} +} + +// orderResource is the resource implementation. +type filesystemResource struct { + client *itacservices.IDCServicesClient +} + +// Configure adds the provider configured client to the resource. +func (r *filesystemResource) Configure(_ context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + + client, ok := req.ProviderData.(*itacservices.IDCServicesClient) + + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *itacservices.IDCServicesClient, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + + return + } + + r.client = client +} + +// Metadata returns the resource type name. +func (r *filesystemResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_filesystem" +} + +// Schema defines the schema for the resource. +func (r *filesystemResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Required: true, + }, + "cloudaccount": schema.StringAttribute{ + Computed: true, + }, + "description": schema.StringAttribute{ + Optional: true, + }, + "availability_zone": schema.StringAttribute{ + Computed: true, + }, + "spec": schema.SingleNestedAttribute{ + Required: true, + Attributes: map[string]schema.Attribute{ + "size_in_tb": schema.Int64Attribute{ + Required: true, + }, + "access_mode": schema.StringAttribute{ + Computed: true, + Default: stringdefault.StaticString("ReadWrite"), + }, + "encrypted": schema.BoolAttribute{ + Computed: true, + Default: booldefault.StaticBool(true), + }, + "storage_class": schema.StringAttribute{ + Computed: true, + }, + "filesystem_type": schema.StringAttribute{ + Computed: true, + }, + }, + }, + "cluster_info": schema.ObjectAttribute{ + AttributeTypes: map[string]attr.Type{ + "cluster_address": types.StringType, + "cluster_version": types.StringType, + }, + Computed: true, + }, + "access_info": schema.ObjectAttribute{ + AttributeTypes: map[string]attr.Type{ + "namespace": types.StringType, + "filesystem_name": types.StringType, + "username": types.StringType, + "password": types.StringType, + }, + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + }, + } + +} + +// Create creates the resource and sets the initial Terraform state. +func (r *filesystemResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + // Retrieve values from plan + var plan filesystemResourceModel + + diags := req.Plan.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + inArg := itacservices.FilesystemCreateRequest{ + Metadata: struct { + Name string "json:\"name\"" + Description string "json:\"description\"" + }{ + Name: plan.Name.ValueString(), + Description: plan.Description.ValueString(), + }, + Spec: struct { + Request struct { + Size string "json:\"storage\"" + } "json:\"request\"" + StorageClass string "json:\"storageClass\"" + AccessMode string "json:\"accessModes\"" + FilesystemType string "json:\"filesystemType\"" + InstanceType string "json:\"instanceType\"" + Encrypted bool "json:\"Encrypted\"" + AvailabilityZone string "json:\"availabilityZone\"" + }{ + Request: struct { + Size string "json:\"storage\"" + }{ + Size: fmt.Sprintf("%dTB", plan.Spec.Size.ValueInt64()), + }, + FilesystemType: "ComputeGeneral", + InstanceType: "storage-file", // hard-coded for now + AvailabilityZone: fmt.Sprintf("%sa", *r.client.Region), + StorageClass: "GeneralPurpose", + AccessMode: plan.Spec.AccessMode.ValueString(), + Encrypted: plan.Spec.Encrypted.ValueBool(), + }, + } + tflog.Info(ctx, "making a call to IDC Service for create filesystem") + fsResp, err := r.client.CreateFilesystem(ctx, &inArg) + if err != nil { + resp.Diagnostics.AddError( + "Error creating order", + "Could not create order, unexpected error: "+err.Error(), + ) + return + } + + // Map response body to schema and populate Computed attribute values + plan.AvailabilityZone = types.StringValue(fsResp.Spec.AvailabilityZone) + plan.Cloudaccount = types.StringValue(fsResp.Metadata.Cloudaccount) + plan.ID = types.StringValue(fsResp.Metadata.ResourceId) + plan.Status = types.StringValue(mapFilesystemStatus(fsResp.Status.Phase)) + + clusterInfoMap := models.FilesystemClusteModel{ + ClusterAddress: types.StringValue(fsResp.Status.Mount.ClusterAddr), + ClusterVersion: types.StringValue(fsResp.Status.Mount.ClusterVersion), + } + + accessInfoMap := models.FilesystemAccessModel{ + Namespace: types.StringValue(fsResp.Status.Mount.Namespace), + Filesystem: types.StringValue(fsResp.Status.Mount.FilesystemName), + Username: types.StringValue(fsResp.Status.Mount.UserName), + Password: types.StringValue(fsResp.Status.Mount.Password), + } + + plan.ClusterInfo, diags = types.ObjectValueFrom(ctx, clusterInfoMap.AttributeTypes(), clusterInfoMap) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + plan.AccessInfo, diags = types.ObjectValueFrom(ctx, accessInfoMap.AttributeTypes(), accessInfoMap) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Set state to fully populated data + diags = resp.State.Set(ctx, plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Read refreshes the Terraform state with the latest data. +func (r *filesystemResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + // Get current state + var orig filesystemResourceModel + diags := req.State.Get(ctx, &orig) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Get refreshed order value from IDC Service + filesystem, err := r.client.GetFilesystemByResourceId(ctx, orig.ID.ValueString()) + if err != nil { + resp.Diagnostics.AddError( + "Error Reading IDC Filesystem resource", + "Could not read IDC Filesystem resource ID "+orig.ID.ValueString()+": "+err.Error(), + ) + return + } + + state := &filesystemResourceModel{} + sizeStr := strings.Split(filesystem.Spec.Request.Size, "GB")[0] + size, _ := strconv.ParseInt(sizeStr, 10, 64) + + state.ID = types.StringValue(filesystem.Metadata.ResourceId) + state.Cloudaccount = types.StringValue(filesystem.Metadata.Cloudaccount) + state.Name = types.StringValue(filesystem.Metadata.Name) + state.Description = types.StringValue(filesystem.Metadata.Description) + state.AvailabilityZone = types.StringValue(filesystem.Spec.AvailabilityZone) + state.Spec = &models.FilesystemSpec{ + Size: types.Int64Value(size), + AccessMode: types.StringValue(filesystem.Spec.AccessMode), + Encrypted: types.BoolValue(filesystem.Spec.Encrypted), + } + + state.Status = types.StringValue(mapFilesystemStatus(filesystem.Status.Phase)) + + clusterInfoMap := models.FilesystemClusteModel{ + ClusterAddress: types.StringValue(filesystem.Status.Mount.ClusterAddr), + ClusterVersion: types.StringValue(filesystem.Status.Mount.ClusterVersion), + } + + accessInfoMap := models.FilesystemAccessModel{ + Namespace: types.StringValue(filesystem.Status.Mount.Namespace), + Filesystem: types.StringValue(filesystem.Status.Mount.FilesystemName), + Username: types.StringValue(filesystem.Status.Mount.UserName), + Password: types.StringValue(filesystem.Status.Mount.Password), + } + + state.ClusterInfo, diags = types.ObjectValueFrom(ctx, clusterInfoMap.AttributeTypes(), clusterInfoMap) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + state.AccessInfo, diags = types.ObjectValueFrom(ctx, accessInfoMap.AttributeTypes(), accessInfoMap) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Set refreshed state + diags = resp.State.Set(ctx, state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *filesystemResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { +} + +func (r *filesystemResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + // Retrieve import ID and save to id attribute + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *filesystemResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + // Get current state + var state filesystemResourceModel + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Delete the order from IDC Services + err := r.client.DeleteFilesystemByResourceId(ctx, state.ID.ValueString()) + if err != nil { + resp.Diagnostics.AddError( + "Error Deleting IDC Instance resource", + "Could not delete IDC Instance resource ID "+state.ID.ValueString()+": "+err.Error(), + ) + return + } +} + +func mapFilesystemStatus(fsStatus string) string { + switch fsStatus { + case "FSReady": + return "ready" + case "FSFailed": + return "failed" + case "FSProvisioning": + return "provisioning" + case "FSDeleting": + return "deleting" + case "FSDeleted": + return "deleted" + default: + return "unspecified" + } +} diff --git a/internal/provider/filesystems_data_source.go b/internal/provider/filesystems_data_source.go new file mode 100644 index 0000000..099b2ef --- /dev/null +++ b/internal/provider/filesystems_data_source.go @@ -0,0 +1,197 @@ +package provider + +import ( + "context" + "fmt" + "strconv" + "strings" + + "terraform-provider-intelcloud/internal/models" + "terraform-provider-intelcloud/pkg/itacservices" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func NewFilesystemsDataSource() datasource.DataSource { + return &filesystemsDataSource{} +} + +type filesystemsDataSource struct { + client *itacservices.IDCServicesClient +} + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &filesystemsDataSource{} + _ datasource.DataSourceWithConfigure = &filesystemsDataSource{} +) + +// storagesDataSourceModel maps the data source schema data. +type filesystemsDataSourceModel struct { + Filesystems []models.FilesystemModel `tfsdk:"filesystems"` +} + +// Configure adds the provider configured client to the data source. +func (d *filesystemsDataSource) Configure(_ context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + + client, ok := req.ProviderData.(*itacservices.IDCServicesClient) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *itacservices.IDCServicesClient, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + + return + } + + d.client = client +} + +func (d *filesystemsDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_filesystems" +} + +func (d *filesystemsDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "filesystems": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "resource_id": schema.StringAttribute{ + Computed: true, + }, + "cloudaccount": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "description": schema.StringAttribute{ + Optional: true, + }, + "availability_zone": schema.StringAttribute{ + Computed: true, + }, + "spec": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "size_in_tb": schema.Int64Attribute{ + Computed: true, + }, + "access_mode": schema.StringAttribute{ + Computed: true, + }, + "filesystem_type": schema.StringAttribute{ + Computed: true, + }, + "storage_class": schema.StringAttribute{ + Computed: true, + }, + "encrypted": schema.BoolAttribute{ + Computed: true, + }, + }, + }, + "cluster_info": schema.ObjectAttribute{ + AttributeTypes: map[string]attr.Type{ + "cluster_address": types.StringType, + "cluster_version": types.StringType, + }, + Computed: true, + }, + "access_info": schema.ObjectAttribute{ + AttributeTypes: map[string]attr.Type{ + "namespace": types.StringType, + "filesystem_name": types.StringType, + "username": types.StringType, + "password": types.StringType, + }, + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + }, + } +} + +func (d *filesystemsDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + + var state filesystemsDataSourceModel + state.Filesystems = []models.FilesystemModel{} + + diags := resp.Diagnostics + fsList, err := d.client.GetFilesystems(ctx) + if err != nil { + resp.Diagnostics.AddError( + "Unable to Read IDC Filesystems", + err.Error(), + ) + return + } + + for _, fs := range fsList.FilesystemList { + sizeStr := strings.Split(fs.Spec.Request.Size, "GB")[0] + size, _ := strconv.ParseInt(sizeStr, 10, 64) + fsModel := models.FilesystemModel{ + Cloudaccount: types.StringValue(fs.Metadata.Cloudaccount), + Name: types.StringValue(fs.Metadata.Name), + Description: types.StringValue(fs.Metadata.Description), + ResourceId: types.StringValue(fs.Metadata.ResourceId), + AvailabilityZone: types.StringValue(fs.Spec.AvailabilityZone), + Spec: models.FilesystemSpec{ + Size: types.Int64Value(size), + AccessMode: types.StringValue(fs.Spec.AccessMode), + FilesystemType: types.StringValue(fs.Spec.FilesystemType), + StorageClass: types.StringValue(fs.Spec.StorageClass), + Encrypted: types.BoolValue(fs.Spec.Encrypted), + }, + + Status: types.StringValue(fs.Status.Phase), + } + fsModel.Status = types.StringValue(mapFilesystemStatus(fs.Status.Phase)) + + clusterInfoMap := models.FilesystemClusteModel{ + ClusterAddress: types.StringValue(fs.Status.Mount.ClusterAddr), + ClusterVersion: types.StringValue(fs.Status.Mount.ClusterVersion), + } + + accessInfoMap := models.FilesystemAccessModel{ + Namespace: types.StringValue(fs.Status.Mount.Namespace), + Filesystem: types.StringValue(fs.Status.Mount.FilesystemName), + Username: types.StringValue(fs.Status.Mount.UserName), + Password: types.StringValue(fs.Status.Mount.Password), + } + + fsModel.ClusterInfo, diags = types.ObjectValueFrom(ctx, clusterInfoMap.AttributeTypes(), clusterInfoMap) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + fsModel.AccessInfo, diags = types.ObjectValueFrom(ctx, accessInfoMap.AttributeTypes(), accessInfoMap) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + state.Filesystems = append(state.Filesystems, fsModel) + } + + // Set state + diags = resp.State.Set(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} diff --git a/internal/provider/iks_cluster_resource.go b/internal/provider/iks_cluster_resource.go new file mode 100644 index 0000000..dbc48bf --- /dev/null +++ b/internal/provider/iks_cluster_resource.go @@ -0,0 +1,325 @@ +package provider + +import ( + "context" + "fmt" + "strconv" + "terraform-provider-intelcloud/internal/models" + "terraform-provider-intelcloud/pkg/itacservices" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &iksClusterResource{} + _ resource.ResourceWithConfigure = &iksClusterResource{} +) + +// orderKubernetesModel maps the resource schema data. +type iksClusterResourceModel struct { + ID types.String `tfsdk:"id"` + Cloudaccount types.String `tfsdk:"cloudaccount"` + Name types.String `tfsdk:"name"` + AvailabilityZone types.String `tfsdk:"availability_zone"` + K8sversion types.String `tfsdk:"kubernetes_version"` + ClusterStatus types.String `tfsdk:"cluster_status"` + Network types.Object `tfsdk:"network"` + UpgardeAvailable types.Bool `tfsdk:"upgrade_available"` + // UpgradableVersions []types.String `tfsdk:"upgrade_k8s_versions_available"` + + Storage *models.IKSStorage `tfsdk:"storage"` +} + +// NewIKSClusterResource is a helper function to simplify the provider implementation. +func NewIKSClusterResource() resource.Resource { + return &iksClusterResource{} +} + +// orderKubernetes is the resource implementation. +type iksClusterResource struct { + client *itacservices.IDCServicesClient +} + +// Configure adds the provider configured client to the resource. +func (r *iksClusterResource) Configure(_ context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + + client, ok := req.ProviderData.(*itacservices.IDCServicesClient) + + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *itacservices.IDCServicesClient, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + + return + } + + r.client = client +} + +// Metadata returns the resource type name. +func (r *iksClusterResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_iks_cluster" +} + +// Schema defines the schema for the resource. +func (r *iksClusterResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Required: true, + }, + "id": schema.StringAttribute{ + Computed: true, + }, + "cloudaccount": schema.StringAttribute{ + Computed: true, + }, + "availability_zone": schema.StringAttribute{ + Optional: true, + }, + "kubernetes_version": schema.StringAttribute{ + Required: true, + }, + "cluster_status": schema.StringAttribute{ + Computed: true, + }, + "network": schema.ObjectAttribute{ + AttributeTypes: map[string]attr.Type{ + "cluster_cidr": types.StringType, + "service_cidr": types.StringType, + "cluster_dns": types.StringType, + "enable_lb": types.BoolType, + }, + Computed: true, + }, + "storage": schema.SingleNestedAttribute{ + Required: true, + Attributes: map[string]schema.Attribute{ + "size_in_gb": schema.Int64Attribute{ + Required: true, + }, + "state": schema.StringAttribute{ + Computed: true, + }, + "storage_provider": schema.StringAttribute{ + Computed: true, + }, + }, + }, + "upgrade_available": schema.BoolAttribute{ + Computed: true, + }, + // "upgrade_k8s_versions_available": schema.ListAttribute{ + // ElementType: types.StringType, + // Computed: true, + // }, + }, + } +} + +// Create creates the resource and sets the initial Terraform state. +func (r *iksClusterResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + // Retrieve values from plan + var plan iksClusterResourceModel + + diags := req.Plan.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + inArg := itacservices.IKSCreateRequest{ + Name: plan.Name.ValueString(), + K8sVersion: plan.K8sversion.ValueString(), + InstanceType: "iks-cluster", + RuntimeName: "Containerd", + } + iksClusterResp, cloudaccount, err := r.client.CreateIKSCluster(ctx, &inArg, false) + if err != nil { + resp.Diagnostics.AddError( + "Error creating order", + "Could not create order, unexpected error: "+err.Error(), + ) + return + } + // iksClusterResp := idcservices.IKSCluster{ + // ResourceId: "cl-lc2ze6pu4i", + // } + // var cloudaccount *string + // Map response body to schema and populate Computed attribute values + plan.ID = types.StringValue(iksClusterResp.ResourceId) + plan.ClusterStatus = types.StringValue(iksClusterResp.ClusterState) + if cloudaccount != nil { + plan.Cloudaccount = types.StringValue(*cloudaccount) + } else { + plan.Cloudaccount = types.StringNull() + } + + network := models.ClusterNetwork{ + ClusterCIDR: types.StringValue(iksClusterResp.Network.ClusterCIDR), + ClusterDNS: types.StringValue(iksClusterResp.Network.ClusterDNS), + EnableLB: types.BoolValue(iksClusterResp.Network.EnableLB), + ServiceCIDR: types.StringValue(iksClusterResp.Network.ServcieCIDR), + } + plan.Network, diags = types.ObjectValueFrom(ctx, network.AttributeTypes(), network) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + if !plan.Storage.Size.IsNull() { + inArg := itacservices.IKSStorageCreateRequest{ + Enable: true, + Size: fmt.Sprintf("%sGB", strconv.FormatInt(plan.Storage.Size.ValueInt64(), 10)), + } + + storageResp, _, err := r.client.CreateIKSStorage(ctx, &inArg, plan.ID.ValueString()) + if err != nil { + resp.Diagnostics.AddError( + "Error creating iks file storage", + "Could not create iks file storage, unexpected error: "+err.Error(), + ) + return + } + + sizeNum, _ := strconv.ParseInt(storageResp.Size, 10, 64) + currV := models.IKSStorage{ + Size: types.Int64Value(sizeNum), + State: types.StringValue(storageResp.State), + StorageProvider: types.StringValue(storageResp.Provider), + } + plan.Storage = &currV + } + + plan.UpgardeAvailable = types.BoolValue(iksClusterResp.UpgradeAvailable) + + // for _, k := range iksClusterResp.UpgradableK8sVersions { + // plan.KubernetesCluster.UpgradableVersions = append(plan.KubernetesCluster.UpgradableVersions, types.StringValue(k)) + // } + + // Set state to fully populated data + diags = resp.State.Set(ctx, plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Read refreshes the Terraform state with the latest data. +func (r *iksClusterResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + // Get current state + var state iksClusterResourceModel + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + iksClusterResp, cloudaccount, err := r.client.GetIKSClusterByClusterUUID(ctx, state.ID.ValueString()) + if err != nil { + resp.Diagnostics.AddError( + "Error reading state", + "Could not read state, unexpected error: "+err.Error(), + ) + return + } + + // Map response body to schema and populate Computed attribute values + state.ID = types.StringValue(iksClusterResp.ResourceId) + state.ClusterStatus = types.StringValue(iksClusterResp.ClusterState) + if cloudaccount != nil { + state.Cloudaccount = types.StringValue(*cloudaccount) + } else { + state.Cloudaccount = types.StringNull() + } + + network := models.ClusterNetwork{ + ClusterCIDR: types.StringValue(iksClusterResp.Network.ClusterCIDR), + ClusterDNS: types.StringValue(iksClusterResp.Network.ClusterDNS), + EnableLB: types.BoolValue(iksClusterResp.Network.EnableLB), + ServiceCIDR: types.StringValue(iksClusterResp.Network.ServcieCIDR), + } + state.Network, diags = types.ObjectValueFrom(ctx, network.AttributeTypes(), network) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // volumes := []models.IKSStorage{} + // for _, v := range iksClusterResp.Storages { + // currV := models.IKSStorage{ + // Size: types.StringValue(v.Size), + // State: types.StringValue(v.State), + // StorageProvider: types.StringValue(v.Provider), + // } + // volumes = append(volumes, currV) + // } + // state.KubernetesCluster.Storage, diags = types.ListValueFrom(ctx, types.ObjectType{}.WithAttributeTypes(models.IKStorageAttributes), volumes) + // resp.Diagnostics.Append(diags...) + // if resp.Diagnostics.HasError() { + // return + // } + + // ngs := []models.NodeGroup{} + // for _, n := range iksClusterResp.NodeGroups { + // ng := models.NodeGroup{ + // ID: types.StringValue(n.ID), + // Name: types.StringValue(n.Name), + // Count: types.Int64Value(n.Count), + // InstanceType: types.StringValue(n.InstanceType), + // IMIId: types.StringValue(n.IMIID), + // State: types.StringValue(n.State), + // UserDataURL: types.StringValue(n.UserDataURL), + // } + // for _, sshk := range n.SSHKeyNames { + // state.KubernetesCluster.SSHPublicKeyNames = append(state.KubernetesCluster.SSHPublicKeyNames, types.StringValue(sshk.Name)) + // } + // ngs = append(ngs, ng) + // } + // state.KubernetesCluster.NodeGroups, diags = types.ListValueFrom(ctx, types.ObjectType{}.WithAttributeTypes(models.NodeGroupAttributes), ngs) + // resp.Diagnostics.Append(diags...) + // if resp.Diagnostics.HasError() { + // return + // } + state.UpgardeAvailable = types.BoolValue(iksClusterResp.UpgradeAvailable) + + // Set refreshed state + diags = resp.State.Set(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *iksClusterResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *iksClusterResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + // Get current state + var state iksClusterResourceModel + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Delete the order from IDC Services + err := r.client.DeleteIKSCluster(ctx, state.ID.ValueString()) + if err != nil { + resp.Diagnostics.AddError( + "Error Deleting IDC IKS Cluster resource", + "Could not delete IDC IDC IKS Cluster ID "+state.ID.String()+": "+err.Error(), + ) + return + } +} diff --git a/internal/provider/iks_data_source.go b/internal/provider/iks_data_source.go new file mode 100644 index 0000000..65dee9d --- /dev/null +++ b/internal/provider/iks_data_source.go @@ -0,0 +1,290 @@ +package provider + +import ( + "context" + "fmt" + "strconv" + "terraform-provider-intelcloud/internal/models" + "terraform-provider-intelcloud/pkg/itacservices" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func NewKubernetesDataSource() datasource.DataSource { + return &kubernetesDataSource{} +} + +type kubernetesDataSource struct { + client *itacservices.IDCServicesClient +} + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &kubernetesDataSource{} + _ datasource.DataSourceWithConfigure = &kubernetesDataSource{} +) + +// storagesDataSourceModel maps the data source schema data. +type kubernetesDataSourceModel struct { + Clusters []models.KubernetesClusterModel `tfsdk:"clusters"` +} + +// Configure adds the provider configured client to the data source. +func (d *kubernetesDataSource) Configure(_ context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + + client, ok := req.ProviderData.(*itacservices.IDCServicesClient) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *itacservices.IDCServicesClient, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + + return + } + + d.client = client +} + +func (d *kubernetesDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_iks_clusters" +} + +func (d *kubernetesDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "clusters": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Required: true, + }, + "resource_id": schema.StringAttribute{ + Computed: true, + }, + "cloudaccount": schema.StringAttribute{ + Computed: true, + }, + "availability_zone": schema.StringAttribute{ + Optional: true, + }, + "kubernetes_version": schema.StringAttribute{ + Computed: true, + }, + "cluster_status": schema.StringAttribute{ + Computed: true, + }, + "load_balancers": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Computed: true, + }, + "vip_state": schema.StringAttribute{ + Computed: true, + }, + "vip_ip": schema.StringAttribute{ + Computed: true, + }, + "port": schema.Int64Attribute{ + Computed: true, + }, + "pool_port": schema.Int64Attribute{ + Computed: true, + }, + "vip_type": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "network": schema.ObjectAttribute{ + AttributeTypes: map[string]attr.Type{ + "cluster_cidr": types.StringType, + "service_cidr": types.StringType, + "cluster_dns": types.StringType, + "enable_lb": types.BoolType, + }, + Computed: true, + }, + "node_groups": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Computed: true, + }, + "count": schema.Int64Attribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "instance_type": schema.StringAttribute{ + Computed: true, + }, + "imiid": schema.StringAttribute{ + Computed: true, + }, + "state": schema.StringAttribute{ + Computed: true, + }, + "userdata_url": schema.StringAttribute{ + Optional: true, + }, + }, + }, + }, + "ssh_public_key_names": schema.ListAttribute{ + ElementType: types.StringType, + Required: true, + }, + "storages": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "size": schema.StringAttribute{ + Computed: true, + }, + "state": schema.StringAttribute{ + Computed: true, + }, + "storage_provider": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "upgrade_available": schema.BoolAttribute{ + Computed: true, + }, + "upgrade_k8s_versions_available": schema.ListAttribute{ + ElementType: types.StringType, + Required: true, + }, + }, + }, + }, + }, + } +} +func (d *kubernetesDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + + var state kubernetesDataSourceModel + state.Clusters = []models.KubernetesClusterModel{} + + diags := resp.Diagnostics + iksClusters, cloudaccount, err := d.client.GetKubernetesClusters(ctx) + if err != nil { + resp.Diagnostics.AddError( + "Unable to Read IDC Kubernetes Clusters", + err.Error(), + ) + return + } + + for _, cl := range iksClusters.Clusters { + iksModel := models.KubernetesClusterModel{ + ClusterUUID: types.StringValue(cl.ResourceId), + Name: types.StringValue(cl.Name), + AvailabilityZone: types.StringNull(), + Cloudaccount: types.StringValue(*cloudaccount), + K8sversion: types.StringValue(cl.K8sVersion), + ClusterStatus: types.StringValue(cl.ClusterState), + UpgardeAvailable: types.BoolValue(cl.UpgradeAvailable), + } + network := models.ClusterNetwork{ + ClusterCIDR: types.StringValue(cl.Network.ClusterCIDR), + ServiceCIDR: types.StringValue(cl.Network.ServcieCIDR), + ClusterDNS: types.StringValue(cl.Network.ClusterDNS), + EnableLB: types.BoolValue(cl.Network.EnableLB), + } + iksModel.Network, diags = types.ObjectValueFrom(ctx, network.AttributeTypes(), network) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // for _, k := range cl.UpgradableK8sVersions { + // iksModel.UpgradableVersions = append(iksModel.UpgradableVersions, types.StringValue(k)) + // } + + // Map NodeGroups + ngs := []models.NodeGroup{} + for _, n := range cl.NodeGroups { + ng := models.NodeGroup{ + ID: types.StringValue(n.ID), + Name: types.StringValue(n.Name), + Count: types.Int64Value(n.Count), + InstanceType: types.StringValue(n.InstanceType), + IMIId: types.StringValue(n.IMIID), + State: types.StringValue(n.State), + UserDataURL: types.StringValue(n.UserDataURL), + } + for _, sshk := range n.SSHKeyNames { + iksModel.SSHPublicKeyNames = append(iksModel.SSHPublicKeyNames, types.StringValue(sshk.Name)) + } + ngs = append(ngs, ng) + } + + ngObj, diags := types.ListValueFrom(ctx, types.ObjectType{}.WithAttributeTypes(models.NodeGroupAttributes), ngs) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + iksModel.NodeGroups = ngObj + + // Map LoadBalancer/VIPs + vips := []models.IKSLoadBalancer{} + for _, v := range cl.VIPs { + vip := models.IKSLoadBalancer{ + Name: types.StringValue(v.Name), + VipState: types.StringValue(v.State), + VipIp: types.StringValue(v.IP), + Port: types.Int64Value(v.Port), + PoolPort: types.Int64Value(v.PoolPort), + VipType: types.StringValue(v.Type), + } + vips = append(vips, vip) + } + lbObj, diags := types.ListValueFrom(ctx, types.ObjectType{}.WithAttributeTypes(models.IKSLoadLalancerAttributes), vips) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + iksModel.LoadBalancer = lbObj + + // Map Storages + vols := []models.IKSStorage{} + for _, s := range cl.Storages { + sizeNum, _ := strconv.ParseInt(s.Size, 10, 64) + vol := models.IKSStorage{ + Size: types.Int64Value(sizeNum), + StorageProvider: types.StringValue(s.Provider), + State: types.StringValue(s.State), + } + vols = append(vols, vol) + } + storageObj, diags := types.ListValueFrom(ctx, types.ObjectType{}.WithAttributeTypes(models.IKStorageAttributes), vols) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + iksModel.Storage = storageObj + + state.Clusters = append(state.Clusters, iksModel) + } + // Set state + diags = resp.State.Set(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} diff --git a/internal/provider/iks_load_balancer_resource.go b/internal/provider/iks_load_balancer_resource.go new file mode 100644 index 0000000..bcdd3e9 --- /dev/null +++ b/internal/provider/iks_load_balancer_resource.go @@ -0,0 +1,182 @@ +package provider + +import ( + "context" + "fmt" + "strconv" + "terraform-provider-intelcloud/internal/models" + "terraform-provider-intelcloud/pkg/itacservices" + + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &iksLBResource{} + _ resource.ResourceWithConfigure = &iksLBResource{} +) + +// orderIKSNodeGroupModel maps the resource schema data. +type iksLBResourceModel struct { + ClusterUUID types.String `tfsdk:"cluster_uuid"` + LoadBalancers []models.IKSLoadBalancer `tfsdk:"load_balancers"` +} + +// NewIKSLB is a helper function to simplify the provider implementation. +func NewIKSLBResource() resource.Resource { + return &iksLBResource{} +} + +// orderIKSNodeGroup is the resource implementation. +type iksLBResource struct { + client *itacservices.IDCServicesClient +} + +// Configure adds the provider configured client to the resource. +func (r *iksLBResource) Configure(_ context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + + client, ok := req.ProviderData.(*itacservices.IDCServicesClient) + + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *idcservices.IDCServicesClient, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + + return + } + + r.client = client +} + +// Metadata returns the resource type name. +func (r *iksLBResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_iks_lb" +} + +// Schema defines the schema for the resource. +func (r *iksLBResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "cluster_uuid": schema.StringAttribute{ + Required: true, + }, + "load_balancers": schema.ListNestedAttribute{ + Required: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Required: true, + }, + "vip_state": schema.StringAttribute{ + Computed: true, + }, + "vip_ip": schema.StringAttribute{ + Computed: true, + }, + "port": schema.Int64Attribute{ + Required: true, + }, + "pool_port": schema.Int64Attribute{ + Computed: true, + }, + "vip_type": schema.StringAttribute{ + Required: true, + }, + }, + }, + }, + }, + } +} + +// Create creates the resource and sets the initial Terraform state. +func (r *iksLBResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + // Retrieve values from plan + var plan iksLBResourceModel + + diags := req.Plan.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + for idx := range plan.LoadBalancers { + inArg := itacservices.IKSLoadBalancerRequest{ + Name: plan.LoadBalancers[idx].Name.ValueString(), + Port: int(plan.LoadBalancers[idx].Port.ValueInt64()), + VIPType: plan.LoadBalancers[idx].VipType.ValueString(), + } + + ilbResp, _, err := r.client.CreateIKSLoadBalancer(ctx, &inArg, plan.ClusterUUID.ValueString()) + if err != nil { + resp.Diagnostics.AddError( + "Error creating iks load balancer", + "Could not create iks load balancer, unexpected error: "+err.Error(), + ) + return + } + + plan.LoadBalancers[idx].ID = types.StringValue(strconv.FormatInt(ilbResp.ID, 64)) + plan.LoadBalancers[idx].PoolPort = types.Int64Value(int64(ilbResp.PoolPort)) + plan.LoadBalancers[idx].VipState = types.StringValue(ilbResp.VIPState) + plan.LoadBalancers[idx].VipIp = types.StringValue(ilbResp.VIPIP) + } + + // Set state to fully populated data + diags = resp.State.Set(ctx, plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Read refreshes the Terraform state with the latest data. +func (r *iksLBResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + // Get current state + var state iksLBResourceModel + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + for idx, lb := range state.LoadBalancers { + vipIdNum, _ := strconv.ParseInt(lb.ID.ValueString(), 10, 64) + refreshedState, err := r.client.GetIKSLoadBalancerByID(ctx, state.ClusterUUID.ValueString(), vipIdNum) + if err != nil { + resp.Diagnostics.AddError( + "Error Reading IDC Compute IKS Load Balancer resource", + "Could not read IDC Compute IKS Load Balancer resource ID "+state.ClusterUUID.ValueString()+": "+err.Error(), + ) + return + } + state.LoadBalancers[idx].PoolPort = types.Int64Value(int64(refreshedState.PoolPort)) + state.LoadBalancers[idx].VipIp = types.StringValue(refreshedState.VIPIP) + state.LoadBalancers[idx].VipState = types.StringValue(refreshedState.VIPState) + } + + // Set state to fully populated data + diags = resp.State.Set(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *iksLBResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *iksLBResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { +} diff --git a/internal/provider/iks_node_group_resource.go b/internal/provider/iks_node_group_resource.go new file mode 100644 index 0000000..930d618 --- /dev/null +++ b/internal/provider/iks_node_group_resource.go @@ -0,0 +1,228 @@ +package provider + +import ( + "context" + "fmt" + "terraform-provider-intelcloud/internal/models" + "terraform-provider-intelcloud/pkg/itacservices" + + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &iksNodeGroupResource{} + _ resource.ResourceWithConfigure = &iksNodeGroupResource{} +) + +// iksNodeGroupResourceModel maps the resource schema data. +type iksNodeGroupResourceModel struct { + ClusterUUID types.String `tfsdk:"cluster_uuid"` + ID types.String `tfsdk:"id"` + Count types.Int64 `tfsdk:"node_count"` + Name types.String `tfsdk:"name"` + NodeType types.String `tfsdk:"node_type"` + IMIId types.String `tfsdk:"imiid"` + State types.String `tfsdk:"state"` + UserDataURL types.String `tfsdk:"userdata_url"` + SSHPublicKeyNames []types.String `tfsdk:"ssh_public_key_names"` + Interfaces []models.NetworkInterfaceSpec `tfsdk:"interfaces"` +} + +// NewOrderKubernetes is a helper function to simplify the provider implementation. +func NewIKSNodeGroupResource() resource.Resource { + return &iksNodeGroupResource{} +} + +// orderIKSNodeGroup is the resource implementation. +type iksNodeGroupResource struct { + client *itacservices.IDCServicesClient +} + +// Configure adds the provider configured client to the resource. +func (r *iksNodeGroupResource) Configure(_ context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + + client, ok := req.ProviderData.(*itacservices.IDCServicesClient) + + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *itacservices.IDCServicesClient, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + + return + } + + r.client = client +} + +// Metadata returns the resource type name. +func (r *iksNodeGroupResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_iks_node_group" +} + +// Schema defines the schema for the resource. +func (r *iksNodeGroupResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Computed: true, + }, + "cluster_uuid": schema.StringAttribute{ + Required: true, + }, + "node_count": schema.Int64Attribute{ + Required: true, + }, + "node_type": schema.StringAttribute{ + Required: true, + }, + "name": schema.StringAttribute{ + Required: true, + }, + "imiid": schema.StringAttribute{ + Computed: true, + }, + "state": schema.StringAttribute{ + Computed: true, + }, + "userdata_url": schema.StringAttribute{ + Optional: true, + }, + "ssh_public_key_names": schema.ListAttribute{ + ElementType: types.StringType, + Required: true, + }, + "interfaces": schema.ListNestedAttribute{ + Required: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Required: true, + }, + "vnet": schema.StringAttribute{ + Required: true, + }, + }, + }, + }, + }, + } +} + +// Create creates the resource and sets the initial Terraform state. +func (r *iksNodeGroupResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + // Retrieve values from plan + var plan iksNodeGroupResourceModel + + diags := req.Plan.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + inArg := itacservices.IKSNodeGroupCreateRequest{ + Name: plan.Name.ValueString(), + Count: plan.Count.ValueInt64(), + ProductType: "iks-cluster", + InstanceTypeId: plan.NodeType.ValueString(), + UserDataURL: plan.UserDataURL.ValueString(), + } + + for _, k := range plan.SSHPublicKeyNames { + inArg.SSHKeyNames = append(inArg.SSHKeyNames, itacservices.SKey{Name: k.ValueString()}) + } + + for _, inf := range plan.Interfaces { + inArg.Interfaces = append(inArg.Interfaces, + struct { + AvailabilityZone string "json:\"availabilityzonename\"" + VNet string "json:\"networkinterfacevnetname\"" + }{ + AvailabilityZone: inf.Name.ValueString(), + VNet: inf.VNet.ValueString(), + }) + } + + nodeGroupResp, _, err := r.client.CreateIKSNodeGroup(ctx, &inArg, plan.ClusterUUID.ValueString(), false) + if err != nil { + resp.Diagnostics.AddError( + "Error creating iks node group", + "Could not create iks node group, unexpected error: "+err.Error(), + ) + return + } + + plan.ID = types.StringValue(nodeGroupResp.ID) + plan.IMIId = types.StringValue(nodeGroupResp.IMIID) + plan.State = types.StringValue(nodeGroupResp.State) + + // Set state to fully populated data + diags = resp.State.Set(ctx, plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Read refreshes the Terraform state with the latest data. +func (r *iksNodeGroupResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + // Get current state + var state iksNodeGroupResourceModel + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Get refreshed order value from IDC Service + ngState, _, err := r.client.GetIKSNodeGroupByID(ctx, state.ClusterUUID.ValueString(), state.ID.ValueString()) + if err != nil { + resp.Diagnostics.AddError( + "Error Reading IDC Compute IKS Node Group resource", + "Could not read IDC Compute IKS Node Group resource ID "+state.ID.ValueString()+": "+err.Error(), + ) + return + } + + state.IMIId = types.StringValue(ngState.IMIID) + state.State = types.StringValue(ngState.State) + + // Set state to fully populated data + diags = resp.State.Set(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *iksNodeGroupResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *iksNodeGroupResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + // Get current state + var state iksNodeGroupResourceModel + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Delete the order from IDC Services + err := r.client.DeleteIKSNodeGroup(ctx, state.ClusterUUID.ValueString(), state.ID.ValueString()) + if err != nil { + resp.Diagnostics.AddError( + "Error Deleting IDC IKS node group resource", + "Could not delete IDC KS node group resource ID "+state.ID.ValueString()+": "+err.Error(), + ) + return + } +} diff --git a/internal/provider/instance_data_source.go b/internal/provider/instance_data_source.go new file mode 100644 index 0000000..3545980 --- /dev/null +++ b/internal/provider/instance_data_source.go @@ -0,0 +1,238 @@ +package provider + +import ( + "context" + "fmt" + + "terraform-provider-intelcloud/internal/models" + "terraform-provider-intelcloud/pkg/itacservices" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func NewInstanceDataSource() datasource.DataSource { + return &instanceDataSource{} +} + +type instanceDataSource struct { + client *itacservices.IDCServicesClient +} + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &instanceDataSource{} + _ datasource.DataSourceWithConfigure = &instanceDataSource{} +) + +// storagesDataSourceModel maps the data source schema data. +type instanceDataSourceModel struct { + Instances []models.InstanceModel `tfsdk:"instances"` +} + +// Configure adds the provider configured client to the data source. +func (d *instanceDataSource) Configure(_ context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + + client, ok := req.ProviderData.(*itacservices.IDCServicesClient) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *itacservices.IDCServicesClient, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + + return + } + + d.client = client +} + +func (d *instanceDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_instance" +} + +func (d *instanceDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "instances": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Required: true, + }, + "resource_id": schema.StringAttribute{ + Computed: true, + }, + "cloudaccount": schema.StringAttribute{ + Computed: true, + }, + "spec": schema.SingleNestedAttribute{ + Required: true, + Attributes: map[string]schema.Attribute{ + "availability_zone": schema.StringAttribute{ + Optional: true, + }, + "instance_group": schema.StringAttribute{ + Optional: true, + }, + "instance_type": schema.StringAttribute{ + Required: true, + }, + "machine_image": schema.StringAttribute{ + Required: true, + }, + "ssh_public_key_names": schema.ListAttribute{ + ElementType: types.StringType, + Required: true, + }, + "user_data": schema.StringAttribute{ + Optional: true, + }, + }, + }, + "interfaces": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "address": schema.StringAttribute{ + Computed: true, + }, + "dns_name": schema.StringAttribute{ + Computed: true, + }, + "gateway": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "prefix_length": schema.Int64Attribute{ + Computed: true, + }, + "subnet": schema.StringAttribute{ + Computed: true, + }, + "vnet": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "access_info": schema.ObjectAttribute{ + AttributeTypes: map[string]attr.Type{ + "username": types.StringType, + }, + Computed: true, + }, + "ssh_proxy": schema.ObjectAttribute{ + AttributeTypes: map[string]attr.Type{ + "address": types.StringType, + "port": types.Int64Type, + "user": types.StringType, + }, + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + }, + } +} + +func (d *instanceDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + + var state instanceDataSourceModel + state.Instances = []models.InstanceModel{} + + instanceList, err := d.client.GetInstances(ctx) + if err != nil { + resp.Diagnostics.AddError( + "Unable to Read IDC Instances", + err.Error(), + ) + return + } + for _, inst := range instanceList.Instances { + instModel := models.InstanceModel{ + Cloudaccount: types.StringValue(inst.Metadata.Cloudaccount), + Name: types.StringValue(inst.Metadata.Name), + ResourceId: types.StringValue(inst.Metadata.ResourceId), + Spec: models.InstanceSpec{ + InstanceGroup: types.StringValue(inst.Spec.InstanceGroup), + InstanceType: types.StringValue(inst.Spec.InstanceType), + MachineImage: types.StringValue(inst.Spec.MachineImage), + UserData: types.StringValue(inst.Spec.UserData), + }, + Status: types.StringValue(inst.Status.Phase), + } + + for _, k := range inst.Spec.SshPublicKeyNames { + instModel.Spec.SSHPublicKeyNames = append(instModel.Spec.SSHPublicKeyNames, types.StringValue(k)) + } + + infs := []models.NetworkInterface{} + for _, nic := range inst.Status.Interfaces { + // currently we ssume a single interface will have a single address + addr := "" + if len(nic.Addresses) > 0 { + addr = nic.Addresses[0] + } + inf := models.NetworkInterface{ + Addresses: types.StringValue(addr), + DNSName: types.StringValue(nic.DNSName), + Gateway: types.StringValue(nic.Gateway), + Name: types.StringValue(nic.Name), + PrefixLength: types.Int64Value(int64(nic.PrefixLength)), + Subnet: types.StringValue(nic.Subnet), + VNet: types.StringValue(nic.VNet), + } + infs = append(infs, inf) + } + infObject, diags := types.ListValueFrom(ctx, types.ObjectType{}.WithAttributeTypes(models.ProviderInterfaceAttributes), infs) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + instModel.Interfaces = infObject + + accessInfoMap := models.InstanceAccessInfoModel{ + Username: types.StringValue(inst.Status.UserName), + } + + accessObj, diags := types.ObjectValueFrom(ctx, accessInfoMap.AttributeTypes(), accessInfoMap) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + instModel.AccessInfo = accessObj + + sshProxyMap := models.SSHProxyModel{ + ProxyAddress: types.StringValue(inst.Status.SSHProxy.Address), + ProxyPort: types.Int64Value(inst.Status.SSHProxy.Port), + ProxyUser: types.StringValue(inst.Status.SSHProxy.User), + } + sshProxyObj, diags := types.ObjectValueFrom(ctx, sshProxyMap.AttributeTypes(), sshProxyMap) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + instModel.SSHProxy = sshProxyObj + + state.Instances = append(state.Instances, instModel) + } + + // Set state + diags := resp.State.Set(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} diff --git a/internal/provider/instance_resource.go b/internal/provider/instance_resource.go new file mode 100644 index 0000000..b070832 --- /dev/null +++ b/internal/provider/instance_resource.go @@ -0,0 +1,442 @@ +package provider + +import ( + "context" + "fmt" + + "terraform-provider-intelcloud/internal/models" + "terraform-provider-intelcloud/pkg/itacservices" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/listplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &computeInstanceResource{} + _ resource.ResourceWithConfigure = &computeInstanceResource{} +) + +// orderFilesystemModel maps the resource schema data. +type computeInstanceResourceModel struct { + ID types.String `tfsdk:"id"` + Cloudaccount types.String `tfsdk:"cloudaccount"` + Name types.String `tfsdk:"name"` + AvailabilityZone types.String `tfsdk:"availability_zone"` + Spec *models.InstanceSpec `tfsdk:"spec"` + Status types.String `tfsdk:"status"` + Interfaces types.List `tfsdk:"interfaces"` + SSHProxy types.Object `tfsdk:"ssh_proxy"` + AccessInfo types.Object `tfsdk:"access_info"` +} + +// NewOrderFilesystem is a helper function to simplify the provider implementation. +func NewComputeInstanceResource() resource.Resource { + return &computeInstanceResource{} +} + +// computeInstanceResource is the resource implementation. +type computeInstanceResource struct { + client *itacservices.IDCServicesClient +} + +// Configure adds the provider configured client to the resource. +func (r *computeInstanceResource) Configure(_ context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + + client, ok := req.ProviderData.(*itacservices.IDCServicesClient) + + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *itacservices.IDCServicesClient, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + + return + } + + r.client = client +} + +// Metadata returns the resource type name. +func (r *computeInstanceResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_instance" +} + +// Schema defines the schema for the resource. +func (r *computeInstanceResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Required: true, + }, + "cloudaccount": schema.StringAttribute{ + Computed: true, + }, + "availability_zone": schema.StringAttribute{ + Computed: true, + }, + "spec": schema.SingleNestedAttribute{ + Required: true, + Attributes: map[string]schema.Attribute{ + "instance_group": schema.StringAttribute{ + Optional: true, + }, + "instance_type": schema.StringAttribute{ + Required: true, + }, + "machine_image": schema.StringAttribute{ + Required: true, + }, + "ssh_public_key_names": schema.ListAttribute{ + ElementType: types.StringType, + Required: true, + }, + "user_data": schema.StringAttribute{ + Optional: true, + }, + }, + }, + "interfaces": schema.ListNestedAttribute{ + Optional: true, + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "address": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "dns_name": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "gateway": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "name": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "prefix_length": schema.Int64Attribute{ + Computed: true, + PlanModifiers: []planmodifier.Int64{ + int64planmodifier.UseStateForUnknown(), + }, + }, + "subnet": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "vnet": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + }, + }, + PlanModifiers: []planmodifier.List{ + listplanmodifier.UseStateForUnknown(), + }, + }, + "access_info": schema.ObjectAttribute{ + AttributeTypes: map[string]attr.Type{ + "username": types.StringType, + }, + Computed: true, + }, + "ssh_proxy": schema.ObjectAttribute{ + AttributeTypes: map[string]attr.Type{ + "address": types.StringType, + "port": types.Int64Type, + "user": types.StringType, + }, + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + }, + } +} + +// Create creates the resource and sets the initial Terraform state. +func (r *computeInstanceResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + // Retrieve values from plan + var plan computeInstanceResourceModel + + diags := req.Plan.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + tflog.Info(ctx, "making a call to IDC Service to createVnetIfNotExist") + vnetResp, err := r.client.CreateVNetIfNotFound(ctx) + if err != nil || vnetResp == nil { + resp.Diagnostics.AddError( + "Error creating order", + "Could not create order, unexpected error: "+err.Error(), + ) + return + } + + sshKeys := []string{} + for _, k := range plan.Spec.SSHPublicKeyNames { + sshKeys = append(sshKeys, k.ValueString()) + } + + inArg := itacservices.InstanceCreateRequest{ + Metadata: struct { + Name string "json:\"name\"" + }{ + Name: plan.Name.ValueString(), + }, + Spec: struct { + AvailabilityZone string "json:\"availabilityZone\"" + InstanceGroup string "json:\"instanceGroup,omitempty\"" + InstanceType string "json:\"instanceType\"" + Interfaces []struct { + Name string "json:\"name\"" + VNet string "json:\"vNet\"" + } "json:\"interfaces\"" + MachineImage string "json:\"machineImage\"" + SshPublicKeyNames []string "json:\"sshPublicKeyNames\"" + UserData string "json:\"userData,omitempty\"" + }{ + AvailabilityZone: fmt.Sprintf("%sa", *r.client.Region), + InstanceGroup: plan.Spec.InstanceGroup.ValueString(), + Interfaces: []struct { + Name string "json:\"name\"" + VNet string "json:\"vNet\"" + }{ + { + Name: "eth0", + VNet: fmt.Sprintf("%sa-default", *r.client.Region), + }, + }, + InstanceType: plan.Spec.InstanceType.ValueString(), + MachineImage: plan.Spec.MachineImage.ValueString(), + UserData: plan.Spec.UserData.ValueString(), + SshPublicKeyNames: sshKeys, + }, + } + + tflog.Info(ctx, "making a call to IDC Service for create instance") + instResp, err := r.client.CreateInstance(ctx, &inArg, false) + if err != nil { + resp.Diagnostics.AddError( + "Error creating order", + "Could not create order, unexpected error: "+err.Error(), + ) + return + } + + // Map response body to schema and populate Computed attribute values + plan.ID = types.StringValue(instResp.Metadata.ResourceId) + plan.Cloudaccount = types.StringValue(instResp.Metadata.Cloudaccount) + plan.Status = types.StringValue(instResp.Status.Phase) + plan.AvailabilityZone = types.StringValue(instResp.Spec.AvailabilityZone) + + accessInfoMap := models.InstanceAccessInfoModel{ + Username: types.StringValue(instResp.Status.UserName), + } + + plan.AccessInfo, diags = types.ObjectValueFrom(ctx, accessInfoMap.AttributeTypes(), accessInfoMap) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + sshProxyMap := models.SSHProxyModel{ + ProxyAddress: types.StringValue(instResp.Status.SSHProxy.Address), + ProxyPort: types.Int64Value(instResp.Status.SSHProxy.Port), + ProxyUser: types.StringValue(instResp.Status.SSHProxy.User), + } + plan.SSHProxy, diags = types.ObjectValueFrom(ctx, sshProxyMap.AttributeTypes(), sshProxyMap) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + infs := []models.NetworkInterface{} + for _, nic := range instResp.Status.Interfaces { + // currently we ssume a single interface will have a single address + addr := "" + if len(nic.Addresses) > 0 { + addr = nic.Addresses[0] + } + inf := models.NetworkInterface{ + Addresses: types.StringValue(addr), + DNSName: types.StringValue(nic.DNSName), + Gateway: types.StringValue(nic.Gateway), + Name: types.StringValue(nic.Name), + PrefixLength: types.Int64Value(int64(nic.PrefixLength)), + Subnet: types.StringValue(nic.Subnet), + VNet: types.StringValue(nic.VNet), + } + infs = append(infs, inf) + } + plan.Interfaces, diags = types.ListValueFrom(ctx, types.ObjectType{}.WithAttributeTypes(models.ProviderInterfaceAttributes), infs) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Set state to fully populated data + diags = resp.State.Set(ctx, plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + +} + +// Read refreshes the Terraform state with the latest data. +func (r *computeInstanceResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + // Get current state + var state computeInstanceResourceModel + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Get refreshed order value from IDC Service + instance, err := r.client.GetInstanceByResourceId(ctx, state.ID.ValueString()) + if err != nil { + resp.Diagnostics.AddError( + "Error Reading IDC Compute Instance resource", + "Could not read IDC Compute Instance resource ID "+state.ID.ValueString()+": "+err.Error(), + ) + return + } + + tflog.Debug(ctx, "instance read request response", map[string]any{"instance": instance, "resourceId": state.ID.ValueString()}) + + // state = orderInstanceModel{} + // state.Instance = models.InstanceModel{} + + state.Cloudaccount = types.StringValue(instance.Metadata.Cloudaccount) + state.ID = types.StringValue(instance.Metadata.ResourceId) + state.Name = types.StringValue(instance.Metadata.Name) + state.AvailabilityZone = types.StringValue(instance.Spec.AvailabilityZone) + state.Spec = &models.InstanceSpec{ + InstanceGroup: types.StringValue(instance.Spec.InstanceGroup), + InstanceType: types.StringValue(instance.Spec.InstanceType), + MachineImage: types.StringValue(instance.Spec.MachineImage), + UserData: types.StringValue(instance.Spec.UserData), + } + + for _, k := range instance.Spec.SshPublicKeyNames { + state.Spec.SSHPublicKeyNames = append(state.Spec.SSHPublicKeyNames, types.StringValue(k)) + } + + state.Status = types.StringValue(instance.Status.Phase) + + accessInfoMap := models.InstanceAccessInfoModel{ + Username: types.StringValue(instance.Status.UserName), + } + + state.AccessInfo, diags = types.ObjectValueFrom(ctx, accessInfoMap.AttributeTypes(), accessInfoMap) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + sshProxyMap := models.SSHProxyModel{ + ProxyAddress: types.StringValue(instance.Status.SSHProxy.Address), + ProxyPort: types.Int64Value(instance.Status.SSHProxy.Port), + ProxyUser: types.StringValue(instance.Status.SSHProxy.User), + } + state.SSHProxy, diags = types.ObjectValueFrom(ctx, sshProxyMap.AttributeTypes(), sshProxyMap) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + infs := []models.NetworkInterface{} + for _, nic := range instance.Status.Interfaces { + inf := models.NetworkInterface{ + Addresses: types.StringValue(nic.Addresses[0]), + DNSName: types.StringValue(nic.DNSName), + Gateway: types.StringValue(nic.Gateway), + Name: types.StringValue(nic.Name), + PrefixLength: types.Int64Value(int64(nic.PrefixLength)), + Subnet: types.StringValue(nic.Subnet), + VNet: types.StringValue(nic.VNet), + } + // for _, addr := range nic.Addresses { + // inf.Addresses = append(inf.Addresses, types.StringValue(addr)) + // } + infs = append(infs, inf) + } + state.Interfaces, diags = types.ListValueFrom(ctx, types.ObjectType{}.WithAttributeTypes(models.ProviderInterfaceAttributes), infs) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + tflog.Debug(ctx, "instance read request state ready", map[string]any{"status": state.Status.ValueString(), "resourceId": state.ID.ValueString()}) + + // Set refreshed state + diags = resp.State.Set(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *computeInstanceResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { +} + +func (r *computeInstanceResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + // Retrieve import ID and save to id attribute + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *computeInstanceResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + // Get current state + var state computeInstanceResourceModel + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Delete the order from IDC Services + err := r.client.DeleteInstanceByResourceId(ctx, state.ID.ValueString()) + if err != nil { + resp.Diagnostics.AddError( + "Error Deleting IDC Filesystem resource", + "Could not delete IDC Filesystem resource ID "+state.ID.ValueString()+": "+err.Error(), + ) + return + } +} diff --git a/internal/provider/instance_types_data_source.go b/internal/provider/instance_types_data_source.go new file mode 100644 index 0000000..c98310d --- /dev/null +++ b/internal/provider/instance_types_data_source.go @@ -0,0 +1,107 @@ +package provider + +import ( + "context" + "fmt" + + "terraform-provider-intelcloud/internal/models" + "terraform-provider-intelcloud/pkg/itacservices" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func NewInstanceTypesDataSource() datasource.DataSource { + return &instanceTypesDataSource{} +} + +type instanceTypesDataSource struct { + client *itacservices.IDCServicesClient +} + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &instanceTypesDataSource{} + _ datasource.DataSourceWithConfigure = &instanceTypesDataSource{} +) + +// storagesDataSourceModel maps the data source schema data. +type instanceTypesDataSourceModel struct { + InstanceTypes []models.InstanceType `tfsdk:"instance_types"` +} + +// Configure adds the provider configured client to the data source. +func (d *instanceTypesDataSource) Configure(_ context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + + client, ok := req.ProviderData.(*itacservices.IDCServicesClient) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *itacservices.IDCServicesClient, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + + return + } + + d.client = client +} + +func (d *instanceTypesDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_instance_types" +} + +func (d *instanceTypesDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "instance_types": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Computed: true, + }, + "description": schema.StringAttribute{ + Computed: true, + }, + "instance_category": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + }, + } +} + +func (d *instanceTypesDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var state instanceTypesDataSourceModel + + instTypes, err := d.client.GetInstanceTypes(ctx) + if err != nil { + resp.Diagnostics.AddError( + "Unable to Read IDC Instance types", + err.Error(), + ) + return + } + + for _, t := range instTypes.Items { + ifInst := models.InstanceType{ + Name: types.StringValue(t.Metadata.Name), + Description: types.StringValue(t.Spec.Description), + InstanceCategory: types.StringValue(t.Spec.InstanceCategory), + } + state.InstanceTypes = append(state.InstanceTypes, ifInst) + } + + // Set state + diags := resp.State.Set(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} diff --git a/internal/provider/machine_images_data_source.go b/internal/provider/machine_images_data_source.go new file mode 100644 index 0000000..6768ca0 --- /dev/null +++ b/internal/provider/machine_images_data_source.go @@ -0,0 +1,219 @@ +package provider + +import ( + "context" + "fmt" + "strings" + + "terraform-provider-intelcloud/internal/models" + "terraform-provider-intelcloud/pkg/itacservices" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func NewMachineImagesDataSource() datasource.DataSource { + return &machineImagesDataSource{} +} + +type machineImagesDataSource struct { + client *itacservices.IDCServicesClient +} + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &machineImagesDataSource{} + _ datasource.DataSourceWithConfigure = &machineImagesDataSource{} +) + +// storagesDataSourceModel maps the data source schema data. +type machineImagesDataSourceModel struct { + MostRecent bool `tfsdk:"most_recent"` + Filters []KVFilter `tfsdk:"filters"` + Result *models.MachineImage `tfsdk:"result"` + Images []models.MachineImage `tfsdk:"items"` +} + +type KVFilter struct { + Key string `tfsdk:"name"` + Values []string `tfsdk:"values"` +} + +// Configure adds the provider configured client to the data source. +func (d *machineImagesDataSource) Configure(_ context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + + client, ok := req.ProviderData.(*itacservices.IDCServicesClient) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *itacservices.IDCServicesClient, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + + return + } + + d.client = client +} + +func (d *machineImagesDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_machine_images" +} + +func (d *machineImagesDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "most_recent": schema.BoolAttribute{ + Required: true, + }, + "filters": schema.ListNestedAttribute{ + Required: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Required: true, + }, + "values": schema.ListAttribute{ + ElementType: types.StringType, + Required: true, + }, + }, + }, + }, + "result": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Computed: true, + }, + "description": schema.StringAttribute{ + Computed: true, + }, + "instance_category": schema.ListAttribute{ + ElementType: types.StringType, + Computed: true, + }, + "instance_types": schema.ListAttribute{ + ElementType: types.StringType, + Computed: true, + }, + }, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Computed: true, + }, + "description": schema.StringAttribute{ + Computed: true, + }, + "instance_category": schema.ListAttribute{ + ElementType: types.StringType, + Computed: true, + }, + "instance_types": schema.ListAttribute{ + ElementType: types.StringType, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func (d *machineImagesDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var state machineImagesDataSourceModel + + diags := req.Config.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + osImgs, err := d.client.GetMachineImages(ctx) + if err != nil { + resp.Diagnostics.AddError( + "Unable to Read ITAC Machine Images", + err.Error(), + ) + return + } + + allImages := []models.MachineImage{} + + for _, img := range osImgs.Items { + if img.Hidden { + continue + } + tfImg := models.MachineImage{ + Name: types.StringValue(img.Metadata.Name), + Description: types.StringValue(img.Spec.Description), + } + for _, i := range img.Spec.InstanceCategories { + tfImg.InstanceCategory = append(tfImg.InstanceCategory, types.StringValue(i)) + } + for _, t := range img.Spec.InstanceTypes { + tfImg.InstanceTypes = append(tfImg.InstanceCategory, types.StringValue(t)) + } + allImages = append(allImages, tfImg) + } + filteredImages := filterImages(allImages, state.Filters) + + state.Images = append(state.Images, filteredImages...) + state.Result = &filteredImages[0] + + // Set state + diags = resp.State.Set(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +func filterImages(allImages []models.MachineImage, filters []KVFilter) []models.MachineImage { + filteredImages := allImages + + for _, filter := range filters { + switch filter.Key { + case "name": + filteredImages = filterByName(filteredImages, filter.Values) + case "machine-type": + filteredImages = filterByMachineType(filteredImages, filter.Values) + default: + return allImages + } + } + return filteredImages +} + +func filterByName(allImages []models.MachineImage, values []string) []models.MachineImage { + filteredImages := []models.MachineImage{} + for _, v := range values { + for _, img := range allImages { + if strings.Contains(img.Name.ValueString(), v) { + filteredImages = append(filteredImages, img) + } + } + } + return filteredImages +} + +func filterByMachineType(allImages []models.MachineImage, values []string) []models.MachineImage { + filteredImages := []models.MachineImage{} + for _, v := range values { + for _, img := range allImages { + for _, inst := range img.InstanceTypes { + if strings.Contains(inst.ValueString(), v) { + filteredImages = append(filteredImages, img) + } + } + } + } + return filteredImages +} diff --git a/internal/provider/objectstore_resource.go b/internal/provider/objectstore_resource.go new file mode 100644 index 0000000..5171974 --- /dev/null +++ b/internal/provider/objectstore_resource.go @@ -0,0 +1,279 @@ +package provider + +import ( + "context" + "fmt" + + "terraform-provider-intelcloud/internal/models" + "terraform-provider-intelcloud/pkg/itacservices" + + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &objectStorageResource{} + _ resource.ResourceWithConfigure = &objectStorageResource{} + _ resource.ResourceWithImportState = &objectStorageResource{} +) + +// objectstorageResourceModel maps the resource schema data. +type objectStorageResourceModel struct { + ID types.String `tfsdk:"id"` + Cloudaccount types.String `tfsdk:"cloudaccount"` + Name types.String `tfsdk:"name"` + Versioned types.Bool `tfsdk:"versioned"` + Size types.String `tfsdk:"size"` + Status types.String `tfsdk:"status"` + PrivateEndpoint types.String `tfsdk:"private_endpoint"` + SecurityGroups types.List `tfsdk:"security_groups"` +} + +// NewObjectStorageResource is a helper function to simplify the provider implementation. +func NewObjectStorageResource() resource.Resource { + return &objectStorageResource{} +} + +// orderResource is the resource implementation. +type objectStorageResource struct { + client *itacservices.IDCServicesClient +} + +// Configure adds the provider configured client to the resource. +func (r *objectStorageResource) Configure(_ context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + + client, ok := req.ProviderData.(*itacservices.IDCServicesClient) + + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *itacservices.IDCServicesClient, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + + return + } + + r.client = client +} + +// Metadata returns the resource type name. +func (r *objectStorageResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_object_storage_bucket" +} + +// Schema defines the schema for the resource. +func (r *objectStorageResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Required: true, + }, + "cloudaccount": schema.StringAttribute{ + Computed: true, + }, + "versioned": schema.BoolAttribute{ + Required: true, + }, + "size": schema.StringAttribute{ + Computed: true, + }, + "private_endpoint": schema.StringAttribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "security_groups": schema.ListNestedAttribute{ + Optional: true, + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "gateway": schema.StringAttribute{ + Computed: true, + }, + "prefix_length": schema.Int64Attribute{ + Computed: true, + }, + "subnet": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + }, + } + +} + +// Create creates the resource and sets the initial Terraform state. +func (r *objectStorageResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + // Retrieve values from plan + var plan objectStorageResourceModel + + diags := req.Plan.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + inArg := itacservices.ObjectBucketCreateRequest{ + Metadata: struct { + Name string "json:\"name\"" + }{ + Name: plan.Name.ValueString(), + }, + Spec: struct { + Versioned bool "json:\"versioned\"" + InstanceType string "json:\"instanceType\"" + }{ + Versioned: plan.Versioned.ValueBool(), + InstanceType: "storage-object", + }, + } + tflog.Info(ctx, "making a call to IDC Service for create bucket") + bucket, err := r.client.CreateObjectStorageBucket(ctx, &inArg) + if err != nil { + resp.Diagnostics.AddError( + "Error creating order", + "Could not create order, unexpected error: "+err.Error(), + ) + return + } + + plan.Cloudaccount = types.StringValue(bucket.Metadata.Cloudaccount) + plan.ID = types.StringValue(bucket.Metadata.ResourceId) + plan.Status = types.StringValue(mapObjectBucketStatus(bucket.Status.Phase)) + plan.PrivateEndpoint = types.StringValue(bucket.Status.Cluster.AccessEndpoint) + plan.Size = types.StringValue(bucket.Spec.Request.Size) + + secGroups := []models.NetworkSecurityGroup{} + for _, sg := range bucket.Status.SecurityGroups.NetworkFilterAllow { + newSg := models.NetworkSecurityGroup{ + Gateway: types.StringValue(sg.Gateway), + PrefixLength: types.Int64Value(int64(sg.PrefixLength)), + Subnet: types.StringValue(sg.Subnet), + } + secGroups = append(secGroups, newSg) + } + plan.SecurityGroups, diags = types.ListValueFrom(ctx, types.ObjectType{}.WithAttributeTypes(models.NetworkSecurityGroupAttributes), secGroups) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Set state to fully populated data + diags = resp.State.Set(ctx, plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Read refreshes the Terraform state with the latest data. +func (r *objectStorageResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + // Get current state + var state objectStorageResourceModel + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Get refreshed order value from IDC Service + bucket, err := r.client.GetObjectBucketByResourceId(ctx, state.ID.ValueString()) + if err != nil { + resp.Diagnostics.AddError( + "Error Reading IDC Object Bucket resource", + "Could not read IDC Object Bucket resource ID "+state.ID.ValueString()+": "+err.Error(), + ) + return + } + + state.ID = types.StringValue(bucket.Metadata.ResourceId) + state.Cloudaccount = types.StringValue(bucket.Metadata.Cloudaccount) + state.Name = types.StringValue(bucket.Metadata.Name) + state.Size = types.StringValue(bucket.Spec.Request.Size) + state.Versioned = types.BoolValue(bucket.Spec.Versioned) + + state.Status = types.StringValue(mapObjectBucketStatus(bucket.Status.Phase)) + state.PrivateEndpoint = types.StringValue(bucket.Status.Cluster.AccessEndpoint) + + secGroups := []models.NetworkSecurityGroup{} + for _, sg := range bucket.Status.SecurityGroups.NetworkFilterAllow { + newSg := models.NetworkSecurityGroup{ + Gateway: types.StringValue(sg.Gateway), + PrefixLength: types.Int64Value(int64(sg.PrefixLength)), + Subnet: types.StringValue(sg.Subnet), + } + secGroups = append(secGroups, newSg) + } + state.SecurityGroups, diags = types.ListValueFrom(ctx, types.ObjectType{}.WithAttributeTypes(models.NetworkSecurityGroupAttributes), secGroups) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Set refreshed state + diags = resp.State.Set(ctx, state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *objectStorageResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { +} + +func (r *objectStorageResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + // Retrieve import ID and save to id attribute + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *objectStorageResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + // Get current state + var state objectStorageResourceModel + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Delete the order from IDC Services + err := r.client.DeleteBucketByResourceId(ctx, state.ID.ValueString()) + if err != nil { + resp.Diagnostics.AddError( + "Error Deleting IDC Object Storage Bucket resource", + "Could not delete IDC Object Storage Bucket resource ID "+state.ID.ValueString()+": "+err.Error(), + ) + return + } +} + +func mapObjectBucketStatus(fsStatus string) string { + switch fsStatus { + case "BucketReady": + return "ready" + case "BucketFailed": + return "failed" + case "BucketProvisioning": + return "provisioning" + case "BucketDeleting": + return "deleting" + case "BucketDeleted": + return "deleted" + default: + return "unspecified" + } +} diff --git a/internal/provider/objectstore_user_resource.go b/internal/provider/objectstore_user_resource.go new file mode 100644 index 0000000..463e258 --- /dev/null +++ b/internal/provider/objectstore_user_resource.go @@ -0,0 +1,282 @@ +package provider + +import ( + "context" + "fmt" + + "terraform-provider-intelcloud/internal/models" + "terraform-provider-intelcloud/pkg/itacservices" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &objectStorageUserResource{} + _ resource.ResourceWithConfigure = &objectStorageUserResource{} + _ resource.ResourceWithImportState = &objectStorageUserResource{} +) + +// objectStorageUserResourceModel maps the resource schema data. +type objectStorageUserResourceModel struct { + ID types.String `tfsdk:"id"` + BucketId types.String `tfsdk:"bucket_id"` + Cloudaccount types.String `tfsdk:"cloudaccount"` + Name types.String `tfsdk:"name"` + Status types.String `tfsdk:"status"` + AllowActions []types.String `tfsdk:"allow_actions"` + AllowPolicies ObjectUserPolicy `tfsdk:"allow_policies"` + AccessInfo types.Object `tfsdk:"access_info"` +} + +type ObjectUserPolicy struct { + PathPrefix types.String `tfsdk:"path_prefix"` + Policies []types.String `tfsdk:"policies"` +} + +// NewObjectStorageResource is a helper function to simplify the provider implementation. +func NewObjectStorageUserResource() resource.Resource { + return &objectStorageUserResource{} +} + +// orderResource is the resource implementation. +type objectStorageUserResource struct { + client *itacservices.IDCServicesClient +} + +// Configure adds the provider configured client to the resource. +func (r *objectStorageUserResource) Configure(_ context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + + client, ok := req.ProviderData.(*itacservices.IDCServicesClient) + + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *idcservices.IDCServicesClient, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + + return + } + + r.client = client +} + +// Metadata returns the resource type name. +func (r *objectStorageUserResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_object_storage_bucket_user" +} + +// Schema defines the schema for the resource. +func (r *objectStorageUserResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Required: true, + }, + "bucket_id": schema.StringAttribute{ + Required: true, + }, + "cloudaccount": schema.StringAttribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "allow_actions": schema.ListAttribute{ + ElementType: types.StringType, + Required: true, + }, + "allow_policies": schema.SingleNestedAttribute{ + Required: true, + Attributes: map[string]schema.Attribute{ + "path_prefix": schema.StringAttribute{ + Required: true, + }, + "policies": schema.ListAttribute{ + ElementType: types.StringType, + Required: true, + }, + }, + }, + "access_info": schema.ObjectAttribute{ + AttributeTypes: map[string]attr.Type{ + "access_key": types.StringType, + "secret_key": types.StringType, + }, + Computed: true, + }, + }, + } + +} + +// Create creates the resource and sets the initial Terraform state. +func (r *objectStorageUserResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + // Retrieve values from plan + var plan objectStorageUserResourceModel + + diags := req.Plan.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + actions := []string{} + for _, a := range plan.AllowActions { + actions = append(actions, a.ValueString()) + } + + perms := []string{} + for _, p := range plan.AllowPolicies.Policies { + perms = append(perms, p.ValueString()) + } + + bucketPolicy := []itacservices.BucketPolicy{ + { + BucketId: plan.BucketId.ValueString(), + Actions: actions, + Permissions: perms, + Prefix: plan.AllowPolicies.PathPrefix.ValueString(), + }, + } + + inArg := itacservices.ObjectUserCreateRequest{} + inArg.Metadata.Name = plan.Name.ValueString() + + inArg.Spec = append(inArg.Spec, bucketPolicy...) + + tflog.Info(ctx, "making a call to IDC Service for create bucket") + user, err := r.client.CreateObjectStorageUser(ctx, &inArg) + if err != nil { + resp.Diagnostics.AddError( + "Error creating order", + "Could not create order, unexpected error: "+err.Error(), + ) + return + } + + plan.Cloudaccount = types.StringValue(user.Metadata.Cloudaccount) + plan.ID = types.StringValue(user.Metadata.UserId) + plan.Status = types.StringValue(mapObjectUserStatus(user.Status.Phase)) + plan.Name = types.StringValue(user.Metadata.Name) + + creds := models.ObjectUserAccessModel{ + AccessKey: types.StringValue(user.Status.Principal.Credentials.AccessKey), + SecretKey: types.StringValue(user.Status.Principal.Credentials.SecretKey), + } + + plan.AccessInfo, diags = types.ObjectValueFrom(ctx, creds.AttributeTypes(), creds) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Set state to fully populated data + diags = resp.State.Set(ctx, plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Read refreshes the Terraform state with the latest data. +func (r *objectStorageUserResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + // Get current state + var state objectStorageUserResourceModel + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Get refreshed order value from IDC Service + user, err := r.client.GetObjectUserByUserId(ctx, state.ID.ValueString()) + if err != nil { + resp.Diagnostics.AddError( + "Error Reading IDC Object Bucket user resource", + "Could not read IDC Object Bucket user ID "+state.ID.ValueString()+": "+err.Error(), + ) + return + } + + state.ID = types.StringValue(user.Metadata.UserId) + state.Cloudaccount = types.StringValue(user.Metadata.Cloudaccount) + state.Name = types.StringValue(user.Metadata.Name) + state.Status = types.StringValue(mapObjectUserStatus(user.Status.Phase)) + + creds := models.ObjectUserAccessModel{ + AccessKey: types.StringValue(user.Status.Principal.Credentials.AccessKey), + SecretKey: types.StringValue(user.Status.Principal.Credentials.SecretKey), + } + + state.AccessInfo, diags = types.ObjectValueFrom(ctx, creds.AttributeTypes(), creds) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Set refreshed state + diags = resp.State.Set(ctx, state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *objectStorageUserResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { +} + +func (r *objectStorageUserResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + // Retrieve import ID and save to id attribute + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *objectStorageUserResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + // Get current state + var state objectStorageUserResourceModel + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Delete the order from IDC Services + err := r.client.DeleteObjectUserByResourceId(ctx, state.ID.ValueString()) + if err != nil { + resp.Diagnostics.AddError( + "Error Deleting IDC Object Storage Bucket user resource", + "Could not delete IDC Object Storage Bucket user resource ID "+state.ID.ValueString()+": "+err.Error(), + ) + return + } +} + +func mapObjectUserStatus(status string) string { + switch status { + case "ObjectUserReady": + return "ready" + case "ObjectUserFailed": + return "failed" + case "BucketProvisioning": + return "provisioning" + case "ObjectUserDeleting": + return "deleting" + case "ObjectUserDeleted": + return "deleted" + default: + return "unspecified" + } +} diff --git a/internal/provider/provider.go b/internal/provider/provider.go new file mode 100644 index 0000000..6065c14 --- /dev/null +++ b/internal/provider/provider.go @@ -0,0 +1,220 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package provider + +import ( + "context" + "os" + + "terraform-provider-intelcloud/pkg/itacservices" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/provider" + "github.com/hashicorp/terraform-plugin-framework/provider/schema" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ provider.Provider = &idcProvider{} +) + +// idcProviderModel maps provider schema data to a Go type. +type idcProviderModel struct { + Region types.String `tfsdk:"region"` + Cloudaccount types.String `tfsdk:"cloudaccount"` + APIToken types.String `tfsdk:"apitoken"` + ClientId types.String `tfsdk:"clientid"` + ClientSecret types.String `tfsdk:"clientsecret"` +} + +// New is a helper function to simplify provider server and testing implementation. +func New(version string) func() provider.Provider { + return func() provider.Provider { + return &idcProvider{ + version: version, + } + } +} + +// idcProvider is the provider implementation. +type idcProvider struct { + // version is set to the provider version on release, "dev" when the + // provider is built and ran locally, and "test" when running acceptance + // testing. + version string +} + +// Metadata returns the provider type name. +func (p *idcProvider) Metadata(_ context.Context, _ provider.MetadataRequest, resp *provider.MetadataResponse) { + resp.TypeName = "intelcloud" + resp.Version = p.version +} + +// Schema defines the provider-level schema for configuration data. +func (p *idcProvider) Schema(_ context.Context, _ provider.SchemaRequest, resp *provider.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "region": schema.StringAttribute{ + Optional: true, + }, + "cloudaccount": schema.StringAttribute{ + Optional: true, + }, + "apitoken": schema.StringAttribute{ + Optional: true, + }, + "clientid": schema.StringAttribute{ + Optional: true, + }, + "clientsecret": schema.StringAttribute{ + Optional: true, + }, + }, + } +} + +// Configure prepares a HashiCups API client for data sources and resources. +func (p *idcProvider) Configure(ctx context.Context, req provider.ConfigureRequest, resp *provider.ConfigureResponse) { + // Default values to environment variables, but override + // with Terraform configuration value if set. + + region := os.Getenv("ITAC_REGION") + cloudaccount := os.Getenv("ITAC_CLOUDACCOUNT") + clientid := os.Getenv("ITAC_CLIENT_ID") + clientsecret := os.Getenv("ITAC_CLIENT_SECRET") + + // Retrieve provider data from configuration + var config idcProviderModel + diags := req.Config.Get(ctx, &config) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + if !config.Region.IsNull() { + region = config.Region.ValueString() + } + + if !config.Cloudaccount.IsNull() { + cloudaccount = config.Cloudaccount.ValueString() + } + + if !config.ClientId.IsNull() { + clientid = config.ClientId.ValueString() + } + + if !config.ClientSecret.IsNull() { + clientsecret = config.ClientSecret.ValueString() + } + + // If any of the expected configurations are missing, return + // errors with provider-specific guidance. + + if region == "" { + resp.Diagnostics.AddAttributeError( + path.Root("region"), + "Missing ITAC API Region", + "The provider cannot create the ITAC API client as there is a missing or empty value for the ITAC API region. "+ + "Set the region value in the configuration or use the ITAC_REGION environment variable. "+ + "If either is already set, ensure the value is not empty.", + ) + } + + if cloudaccount == "" { + resp.Diagnostics.AddAttributeError( + path.Root("cloudaccount"), + "Missing ITAC Cloudaccount", + "The provider cannot create the ITAC Cloudaccount as there is a missing or empty value for the ITAC Cloudaccount. "+ + "Set the host value in the configuration or use the ITAC_CLOUDACCOUNT environment variable. "+ + "If either is already set, ensure the value is not empty.", + ) + } + + if clientid == "" { + resp.Diagnostics.AddAttributeError( + path.Root("clientid"), + "Missing ITAC Client Id", + "The provider cannot create the ITAC Client Id as there is a missing or empty value for the ITAC client id. "+ + "Set the clientid value in the configuration or use the ITAC_CLIENT_ID environment variable. "+ + "If either is already set, ensure the value is not empty.", + ) + } + + if clientsecret == "" { + resp.Diagnostics.AddAttributeError( + path.Root("clientsecret"), + "Missing ITAC Client secret", + "The provider cannot create the ITAC client secret as there is a missing or empty value for the ITAC client secret "+ + "Set the clientsecret value in the configuration or use the ITAC_CLIENT_SECRET environment variable. "+ + "If either is already set, ensure the value is not empty.", + ) + } + + if resp.Diagnostics.HasError() { + return + } + + clientTokenEndpoint, serviceEndpoint := discoverITACServiceEndpoint(region) + + // Create a new HashiCups client using the configuration values + client, err := itacservices.NewClient(ctx, &serviceEndpoint, &clientTokenEndpoint, &cloudaccount, &clientid, &clientsecret, ®ion) + if err != nil { + resp.Diagnostics.AddError( + "Unable to Create ITAC API Client", + "An unexpected error occurred when creating the ITAC API client. "+ + "If the error is not clear, please contact the provider developers.\n\n"+ + "ITAC Client Error: "+err.Error(), + ) + return + } + + // Make the HashiCups client available during DataSource and Resource + // type Configure methods. + resp.DataSourceData = client + resp.ResourceData = client +} + +// DataSources defines the data sources implemented in the provider. +func (p *idcProvider) DataSources(_ context.Context) []func() datasource.DataSource { + return []func() datasource.DataSource{ + NewFilesystemsDataSource, + NewSSHKeysDataSource, + NewInstanceDataSource, + NewInstanceTypesDataSource, + NewMachineImagesDataSource, + NewKubernetesDataSource, + } +} + +// Resources defines the resources implemented in the provider. +func (p *idcProvider) Resources(_ context.Context) []func() resource.Resource { + return []func() resource.Resource{ + NewFilesystemResource, + NewSSHKeyResource, + NewComputeInstanceResource, + NewIKSClusterResource, + NewIKSNodeGroupResource, + NewIKSLBResource, + NewObjectStorageResource, + NewObjectStorageUserResource, + } +} + +func discoverITACServiceEndpoint(region string) (string, string) { + switch region { + case "us-staging-1": + return "https://client-token.staging.api.idcservice.net", "https://us-staging-1-sdk-api.eglb.intel.com" + case "us-staging-3": + return "https://staging-idc-us-3.eglb.intel.com", "" + case "us-region-1": + return "https://compute-us-region-1-api.cloud.intel.com", "" + case "us-region-2": + return "https://compute-us-region-2-api.cloud.intel.com", "" + default: + return "", "" + } +} diff --git a/internal/provider/provider_test.go b/internal/provider/provider_test.go new file mode 100644 index 0000000..ef6599b --- /dev/null +++ b/internal/provider/provider_test.go @@ -0,0 +1,25 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package provider + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-framework/providerserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +// testAccProtoV6ProviderFactories are used to instantiate a provider during +// acceptance testing. The factory function will be invoked for every Terraform +// CLI command executed to create a provider server to which the CLI can +// reattach. +var testAccProtoV6ProviderFactories = map[string]func() (tfprotov6.ProviderServer, error){ + "scaffolding": providerserver.NewProtocol6WithError(New("test")()), +} + +func testAccPreCheck(t *testing.T) { + // You can add code here to run prior to any test case execution, for example assertions + // about the appropriate environment variables being set are common to see in a pre-check + // function. +} diff --git a/internal/provider/sshkey_data_source.go b/internal/provider/sshkey_data_source.go new file mode 100644 index 0000000..a56884e --- /dev/null +++ b/internal/provider/sshkey_data_source.go @@ -0,0 +1,148 @@ +package provider + +import ( + "context" + "fmt" + + "terraform-provider-intelcloud/pkg/itacservices" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func NewSSHKeysDataSource() datasource.DataSource { + return &sshkeysDataSource{} +} + +type sshkeysDataSource struct { + client *itacservices.IDCServicesClient +} + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &sshkeysDataSource{} + _ datasource.DataSourceWithConfigure = &sshkeysDataSource{} +) + +// storagesDataSourceModel maps the data source schema data. +type sshkeysDataSourceModel struct { + SSHKeys []sshkeyModel `tfsdk:"sshkeys"` +} + +// coffeesModel maps coffees schema data. +type sshkeyModel struct { + Metadata resourceMetadata `tfsdk:"metadata"` + Spec sshkeySpec `tfsdk:"spec"` +} + +type resourceMetadata struct { + ResourceId types.String `tfsdk:"resourceid"` + Cloudaccount types.String `tfsdk:"cloudaccount"` + Name types.String `tfsdk:"name"` + CreatedAt types.String `tfsdk:"createdat"` +} +type sshkeySpec struct { + SSHPublicKey types.String `tfsdk:"ssh_public_key"` + OwnerEmail types.String `tfsdk:"owner_email"` +} + +// Configure adds the provider configured client to the data source. +func (d *sshkeysDataSource) Configure(_ context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + + client, ok := req.ProviderData.(*itacservices.IDCServicesClient) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *itacservices.IDCServicesClient, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + + return + } + + d.client = client +} + +func (d *sshkeysDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_sshkey" +} + +func (d *sshkeysDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "sshkeys": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "metadata": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "resourceid": schema.StringAttribute{ + Computed: true, + }, + "cloudaccount": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "createdat": schema.StringAttribute{ + Computed: true, + }, + }, + }, + "spec": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "ssh_public_key": schema.StringAttribute{ + Computed: true, + }, + "owner_email": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + }, + }, + }, + } +} + +func (d *sshkeysDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + + var state sshkeysDataSourceModel + state.SSHKeys = []sshkeyModel{} + + sshkeyList, err := d.client.GetSSHKeys(ctx) + if err != nil { + resp.Diagnostics.AddError( + "Unable to Read IDC Compute SSHKeys", + err.Error(), + ) + return + } + for _, key := range sshkeyList.SSHKey { + sshkeyModel := sshkeyModel{ + Metadata: resourceMetadata{ + Cloudaccount: types.StringValue(key.Metadata.Cloudaccount), + Name: types.StringValue(key.Metadata.Name), + }, + Spec: sshkeySpec{ + SSHPublicKey: types.StringValue(key.Spec.SSHPublicKey), + OwnerEmail: types.StringValue(key.Spec.OwnerEmail), + }, + } + state.SSHKeys = append(state.SSHKeys, sshkeyModel) + } + + // Set state + diags := resp.State.Set(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} diff --git a/internal/provider/sshkey_resource.go b/internal/provider/sshkey_resource.go new file mode 100644 index 0000000..3013131 --- /dev/null +++ b/internal/provider/sshkey_resource.go @@ -0,0 +1,210 @@ +package provider + +import ( + "context" + "fmt" + "time" + + "terraform-provider-intelcloud/pkg/itacservices" + + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &sshKeyResource{} + _ resource.ResourceWithConfigure = &sshKeyResource{} +) + +// orderSSHKeyModel maps the resource schema data. +type sshKeyResourceModel struct { + Metadata resourceMetadata `tfsdk:"metadata"` + Spec sshkeySpec `tfsdk:"spec"` +} + +// NewOrderFilesystem is a helper function to simplify the provider implementation. +func NewSSHKeyResource() resource.Resource { + return &sshKeyResource{} +} + +// orderResource is the resource implementation. +type sshKeyResource struct { + client *itacservices.IDCServicesClient +} + +// Configure adds the provider configured client to the resource. +func (r *sshKeyResource) Configure(_ context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + + client, ok := req.ProviderData.(*itacservices.IDCServicesClient) + + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *itacservices.IDCServicesClient, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + + return + } + + r.client = client +} + +// Metadata returns the resource type name. +func (r *sshKeyResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_sshkey" +} + +// Schema defines the schema for the resource. +func (r *sshKeyResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "metadata": schema.SingleNestedAttribute{ + Required: true, + Attributes: map[string]schema.Attribute{ + "resourceid": schema.StringAttribute{ + Computed: true, + }, + "cloudaccount": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Required: true, + }, + "createdat": schema.StringAttribute{ + Computed: true, + }, + }, + }, + "spec": schema.SingleNestedAttribute{ + Required: true, + Attributes: map[string]schema.Attribute{ + "ssh_public_key": schema.StringAttribute{ + Required: true, + }, + "owner_email": schema.StringAttribute{ + Required: true, + }, + }, + }, + }, + } +} + +// Create creates the resource and sets the initial Terraform state. +func (r *sshKeyResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + // Retrieve values from plan + var plan sshKeyResourceModel + + diags := req.Plan.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + inArg := itacservices.SSHKeyCreateRequest{ + Metadata: struct { + Name string "json:\"name\"" + }{ + Name: plan.Metadata.Name.ValueString(), + }, + Spec: struct { + SSHPublicKey string "json:\"sshPublicKey\"" + OwnerEmail string "json:\"ownerEmail\"" + }{ + SSHPublicKey: plan.Spec.SSHPublicKey.ValueString(), + OwnerEmail: plan.Spec.OwnerEmail.ValueString(), + }, + } + tflog.Info(ctx, "making a call to IDC Service for create sshkey") + sshkeyCreateResp, err := r.client.CreateSSHkey(ctx, &inArg) + if err != nil { + resp.Diagnostics.AddError( + "Error creating order", + "Could not create order, unexpected error: "+err.Error(), + ) + return + } + + // Map response body to schema and populate Computed attribute values + plan.Metadata.CreatedAt = types.StringValue(time.Now().Format(time.RFC850)) + plan.Metadata.ResourceId = types.StringValue(sshkeyCreateResp.Metadata.ResourceId) + plan.Metadata.Cloudaccount = types.StringValue(sshkeyCreateResp.Metadata.Cloudaccount) + + // Set state to fully populated data + diags = resp.State.Set(ctx, plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + +} + +// Read refreshes the Terraform state with the latest data. +func (r *sshKeyResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + // Get current state + var state sshKeyResourceModel + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Get refreshed order value from IDC Service + sshkey, err := r.client.GetSSHKeyByResourceId(ctx, state.Metadata.ResourceId.ValueString()) + if err != nil { + resp.Diagnostics.AddError( + "Error Reading IDC SSHKey resource", + "Could not read IDC SSHKey resource ID "+state.Metadata.ResourceId.ValueString()+": "+err.Error(), + ) + return + } + + state.Metadata = resourceMetadata{ + ResourceId: types.StringValue(sshkey.Metadata.ResourceId), + Cloudaccount: types.StringValue(sshkey.Metadata.Cloudaccount), + Name: types.StringValue(sshkey.Metadata.Name), + } + state.Spec = sshkeySpec{ + SSHPublicKey: types.StringValue(sshkey.Spec.SSHPublicKey), + OwnerEmail: types.StringValue(sshkey.Spec.OwnerEmail), + } + + // Set refreshed state + diags = resp.State.Set(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *sshKeyResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *sshKeyResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + // Get current state + var state sshKeyResourceModel + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Delete the order from IDC Services + err := r.client.DeleteSSHKeyByResourceId(ctx, state.Metadata.ResourceId.ValueString()) + if err != nil { + resp.Diagnostics.AddError( + "Error Deleting IDC SSHKey resource", + "Could not delete IDC SSHKey resource ID "+state.Metadata.ResourceId.ValueString()+": "+err.Error(), + ) + return + } +} diff --git a/internal/provider/utils.go b/internal/provider/utils.go new file mode 100644 index 0000000..6b315e2 --- /dev/null +++ b/internal/provider/utils.go @@ -0,0 +1,5 @@ +package provider + +func remove(slice []interface{}, s int) []interface{} { + return append(slice[:s], slice[s+1:]...) +} diff --git a/main.go b/main.go new file mode 100644 index 0000000..9c9eaba --- /dev/null +++ b/main.go @@ -0,0 +1,56 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "context" + "flag" + "log" + + "terraform-provider-intelcloud/internal/provider" + + "github.com/hashicorp/terraform-plugin-framework/providerserver" +) + +// Run "go generate" to format example terraform files and generate the docs for the registry/website + +// If you do not have terraform installed, you can remove the formatting command, but its suggested to +// ensure the documentation is formatted properly. +//go:generate terraform fmt -recursive ./examples/ + +// Run the docs generation tool, check its repository for more information on how it works and how docs +// can be customized. +//go:generate go run github.com/hashicorp/terraform-plugin-docs/cmd/tfplugindocs generate -provider-name scaffolding + +var ( + // these will be set by the goreleaser configuration + // to appropriate values for the compiled binary. + version string = "dev" + + // goreleaser can pass other information to the main package, such as the specific commit + // https://goreleaser.com/cookbooks/using-main.version/ +) + +func main() { + var debug bool + + flag.BoolVar(&debug, "debug", false, "set to true to run the provider with support for debuggers like delve") + flag.Parse() + + opts := providerserver.ServeOpts{ + // NOTE: This is not a typical Terraform Registry provider address, + // such as registry.terraform.io/hashicorp/hashicups. This specific + // provider address is used in these tutorials in conjunction with a + // specific Terraform CLI configuration for manual development testing + // of this provider. + Address: "cloud.intel.com/services/idc", + Debug: debug, + } + + err := providerserver.Serve(context.Background(), provider.New(version), opts) + + if err != nil { + log.Fatal(err.Error()) + } +} diff --git a/pkg/itacservices/client.go b/pkg/itacservices/client.go new file mode 100644 index 0000000..f990ecf --- /dev/null +++ b/pkg/itacservices/client.go @@ -0,0 +1,103 @@ +package itacservices + +import ( + "bytes" + "context" + b64 "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "os" + "terraform-provider-intelcloud/pkg/itacservices/common" + "time" + + "github.com/hashicorp/terraform-plugin-log/tflog" +) + +type IDCServicesClient struct { + Host *string + Cloudaccount *string + Apitoken *string + Region *string + Clientid *string + Clientsecret *string + ExpireAt time.Time +} + +var ( + getTokenURL = "{{.Host}}/oauth2/token" +) + +type TokenResponse struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + ExpiresIn int `json:"expires_in"` +} + +func NewClient(ctx context.Context, host, tokenSvc, cloudaccount, clientid, clientsecret, region *string) (*IDCServicesClient, error) { + os.Setenv("NO_PROXY", "") + os.Setenv("no_proxy", "") + + params := struct { + Host string + }{ + Host: *tokenSvc, + } + + // Parse the template string with the provided data + parsedURL, err := common.ParseString(getTokenURL, params) + if err != nil { + return nil, fmt.Errorf("error parsing the url") + } + + data := url.Values{} + data.Set("grant_type", "client_credentials") + data.Set("client_id", *clientid) + + req, err := http.NewRequest("POST", parsedURL, bytes.NewBufferString(data.Encode())) + if err != nil { + return nil, fmt.Errorf("error creating ITAC Token request") + } + + authStr := fmt.Sprintf("%s:%s", *clientid, *clientsecret) + authEncoded := fmt.Sprintf("Basic %s", b64.StdEncoding.EncodeToString([]byte(authStr))) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + req.Header.Set("Accept", "application/json") + + req.Header.Set("Authorization", authEncoded) + client := &http.Client{Timeout: 60 * time.Second} + + tflog.Info(ctx, "making api client request", map[string]interface{}{"request": req.Header, "url": parsedURL, "auth": authEncoded}) + + resp, err := client.Do(req) + if err != nil { + tflog.Info(ctx, "error making api client request", map[string]interface{}{"error": err}) + return nil, fmt.Errorf("error creating ITAC Token request") + } + defer resp.Body.Close() + body, _ := io.ReadAll(resp.Body) + retcode := resp.StatusCode + tokenResp := TokenResponse{} + if retcode != http.StatusOK { + tflog.Info(ctx, "error making api client request", map[string]interface{}{"retcode": retcode, "body": string(body)}) + return nil, fmt.Errorf("error creating ITAC Token request") + } + + if err = json.Unmarshal(body, &tokenResp); err != nil { + tflog.Info(ctx, "error making api client request", map[string]interface{}{"error": err}) + return nil, fmt.Errorf("error creating ITAC Token request") + } + + tflog.Info(ctx, "Token Response", map[string]interface{}{"token": tokenResp.AccessToken, "expires_in": tokenResp.ExpiresIn}) + return &IDCServicesClient{ + Host: host, + Cloudaccount: cloudaccount, + Clientid: clientid, + Clientsecret: clientsecret, + Region: region, + Apitoken: &tokenResp.AccessToken, + ExpireAt: time.Now().Add(time.Duration(tokenResp.ExpiresIn) * time.Second), + }, nil +} diff --git a/pkg/itacservices/common/httpclient.go b/pkg/itacservices/common/httpclient.go new file mode 100644 index 0000000..47d2796 --- /dev/null +++ b/pkg/itacservices/common/httpclient.go @@ -0,0 +1,115 @@ +// INTEL CONFIDENTIAL +// Copyright (C) 2023 Intel Corporation +package common + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net/http" + "time" +) + +// MakeGetAPICall : +func MakeGetAPICall(ctx context.Context, connURL, auth string, payload []byte) (int, []byte, error) { + // logger := log.FromContext(ctx).WithName("common.MakeGetAPICall") + // http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: false} + req, err := http.NewRequest("GET", connURL, bytes.NewBuffer(payload)) + if err != nil { + return http.StatusInternalServerError, nil, err + } + req.Header.Set("Content-Type", "application/json") + if auth != "" { + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", auth)) + } + retries := 3 + body := []byte{} + retcode := http.StatusOK + for try := 1; try <= retries; try++ { + client := &http.Client{Timeout: 60 * time.Second} + resp, err := client.Do(req) + if err != nil { + if try == retries { + return http.StatusInternalServerError, nil, + errors.New("error conencting to api service") + } + time.Sleep(5 * time.Second) + continue + } + defer resp.Body.Close() + body, _ = io.ReadAll(resp.Body) + retcode = resp.StatusCode + break + } + // body = []byte(sampleFilesystemList) + return retcode, body, nil +} + +// MakePOSTAPICall : +func MakePOSTAPICall(ctx context.Context, connURL, auth string, payload []byte) (int, []byte, error) { + + // http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: false} + req, err := http.NewRequest("POST", connURL, bytes.NewBuffer(payload)) + if err != nil { + return http.StatusInternalServerError, nil, err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Accept", "application/json") + if auth != "" { + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", auth)) + } + retries := 3 + body := []byte{} + retcode := http.StatusOK + for try := 1; try <= retries; try++ { + client := &http.Client{Timeout: 60 * time.Second} + resp, err := client.Do(req) + if err != nil { + if try == retries { + return http.StatusInternalServerError, nil, + errors.New("error conencting to api service") + } + time.Sleep(5 * time.Second) + continue + } + defer resp.Body.Close() + body, _ = io.ReadAll(resp.Body) + retcode = resp.StatusCode + break + } + return retcode, body, nil +} + +// MakeDeleteAPICall : +func MakeDeleteAPICall(ctx context.Context, connURL string, auth string, payload []byte) (int, []byte, error) { + req, err := http.NewRequest("DELETE", connURL, bytes.NewBuffer(payload)) + if err != nil { + return http.StatusInternalServerError, nil, err + } + req.Header.Set("Content-Type", "application/json") + if auth != "" { + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", auth)) + } + retries := 3 + body := []byte{} + retcode := http.StatusOK + for try := 1; try <= retries; try++ { + client := &http.Client{Timeout: 60 * time.Second} + resp, err := client.Do(req) + if err != nil { + if try == retries { + return http.StatusInternalServerError, nil, + errors.New("error conencting to api service") + } + time.Sleep(5 * time.Second) + continue + } + defer resp.Body.Close() + body, _ = io.ReadAll(resp.Body) + retcode = resp.StatusCode + break + } + return retcode, body, nil +} diff --git a/pkg/itacservices/common/utils.go b/pkg/itacservices/common/utils.go new file mode 100644 index 0000000..1f7cc43 --- /dev/null +++ b/pkg/itacservices/common/utils.go @@ -0,0 +1,37 @@ +package common + +import ( + "bytes" + "fmt" + "net/http" + "text/template" +) + +// ParseString parses the given template string with the provided data. +func ParseString(templateString string, data interface{}) (string, error) { + t, err := template.New("generic").Parse(templateString) + if err != nil { + return "", err + } + + var result bytes.Buffer + err = t.Execute(&result, data) + if err != nil { + return "", err + } + + return result.String(), nil +} + +func MapHttpError(code int) error { + switch code { + case http.StatusUnauthorized: + return fmt.Errorf("Unauthorized") + case http.StatusBadRequest: + return fmt.Errorf("Bad Request") + case http.StatusInternalServerError: + return fmt.Errorf("Internal Server Error") + default: + return fmt.Errorf("error calling API") + } +} diff --git a/pkg/itacservices/data_sources.go b/pkg/itacservices/data_sources.go new file mode 100644 index 0000000..d9ba39f --- /dev/null +++ b/pkg/itacservices/data_sources.go @@ -0,0 +1,100 @@ +package itacservices + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "terraform-provider-intelcloud/pkg/itacservices/common" + + "github.com/hashicorp/terraform-plugin-log/tflog" +) + +var ( + getAllMachineImagesURL = "{{.Host}}/v1/machineimages" + getAllInstanceTypesURL = "{{.Host}}/v1/instancetypes" +) + +type MachineImageResponse struct { + Items []struct { + Metadata struct { + Name string `json:"name"` + } `json:"metadata"` + Spec struct { + Description string `json:"description"` + InstanceCategories []string `json:"instanceCategories"` + InstanceTypes []string `json:"instanceTypes"` + } `json:"spec"` + Hidden bool `json:"hidden"` + } `json:"items"` +} + +type InstanceTypeResponse struct { + Items []struct { + Metadata struct { + Name string `json:"name"` + } `json:"metadata"` + Spec struct { + Description string `json:"description"` + InstanceCategory string `json:"instanceCategory"` + } `json:"spec"` + } `json:"items"` +} + +func (client *IDCServicesClient) GetMachineImages(ctx context.Context) (*MachineImageResponse, error) { + params := struct { + Host string + }{ + Host: *client.Host, + } + + // Parse the template string with the provided data + parsedURL, err := common.ParseString(getAllMachineImagesURL, params) + if err != nil { + return nil, fmt.Errorf("error parsing the url") + } + retcode, retval, err := common.MakeGetAPICall(ctx, parsedURL, *client.Apitoken, nil) + if err != nil { + tflog.Debug(ctx, "machine images api error", map[string]any{"retcode": retcode, "err": err, "token": *client.Apitoken}) + return nil, fmt.Errorf("error reading machine images") + } + tflog.Debug(ctx, "machine images api", map[string]any{"retcode": retcode, "retval": string(retval), "token": *client.Apitoken}) + if retcode != http.StatusOK { + return nil, common.MapHttpError(retcode) + } + + images := MachineImageResponse{} + if err := json.Unmarshal(retval, &images); err != nil { + return nil, fmt.Errorf("error parsing machine image response") + } + return &images, nil +} + +func (client *IDCServicesClient) GetInstanceTypes(ctx context.Context) (*InstanceTypeResponse, error) { + params := struct { + Host string + }{ + Host: *client.Host, + } + + // Parse the template string with the provided data + parsedURL, err := common.ParseString(getAllInstanceTypesURL, params) + if err != nil { + return nil, fmt.Errorf("error parsing the url") + } + + retcode, retval, err := common.MakeGetAPICall(ctx, parsedURL, *client.Apitoken, nil) + if err != nil { + return nil, fmt.Errorf("error reading machine images") + } + + if retcode != http.StatusOK { + return nil, common.MapHttpError(retcode) + } + instType := InstanceTypeResponse{} + if err := json.Unmarshal(retval, &instType); err != nil { + return nil, fmt.Errorf("error parsing machine image response") + } + + return &instType, nil +} diff --git a/pkg/itacservices/filesystems.go b/pkg/itacservices/filesystems.go new file mode 100644 index 0000000..b702935 --- /dev/null +++ b/pkg/itacservices/filesystems.go @@ -0,0 +1,287 @@ +package itacservices + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "time" + + "terraform-provider-intelcloud/pkg/itacservices/common" + + "github.com/hashicorp/terraform-plugin-log/tflog" + retry "github.com/sethvargo/go-retry" +) + +var ( + getAllFilesystemsURL = "{{.Host}}/v1/cloudaccounts/{{.Cloudaccount}}/filesystems?metadata.filterType=ComputeGeneral" + createFilesystemsURL = "{{.Host}}/v1/cloudaccounts/{{.Cloudaccount}}/filesystems" + getFilesystemByResourceId = "{{.Host}}/v1/cloudaccounts/{{.Cloudaccount}}/filesystems/id/{{.ResourceId}}" + deleteFilesystemByResourceId = "{{.Host}}/v1/cloudaccounts/{{.Cloudaccount}}/filesystems/id/{{.ResourceId}}" + getLoginCredentials = "{{.Host}}/v1/cloudaccounts/{{.Cloudaccount}}/filesystems/id/{{.ResourceId}}/user" +) + +type Filesystems struct { + FilesystemList []Filesystem `json:"items"` +} + +type Filesystem struct { + Metadata struct { + ResourceId string `json:"resourceId"` + Cloudaccount string `json:"cloudAccountId"` + Name string `json:"name"` + Description string `json:"description"` + CreatedAt string `json:"creationTimestamp"` + } `json:"metadata"` + Spec struct { + Request struct { + Size string `json:"storage"` + } `json:"request"` + StorageClass string `json:"storageClass"` + AccessMode string `json:"accessModes"` + FilesystemType string `json:"filesystemType"` + Encrypted bool `json:"Encrypted"` + AvailabilityZone string `json:"availabilityZone"` + } `json:"spec"` + Status struct { + Phase string `json:"phase"` + Mount struct { + ClusterAddr string `json:"clusterAddr"` + ClusterVersion string `json:"clusterVersion"` + Namespace string `json:"namespace"` + UserName string `json:"username"` + Password string `json:"password"` + FilesystemName string `json:"filesystemName"` + } `json:"mount"` + } `json:"status"` +} + +type LoginCreds struct { + User string `json:"user"` + Password string `json:"password"` +} + +type FilesystemCreateRequest struct { + Metadata struct { + Name string `json:"name"` + Description string `json:"description"` + } `json:"metadata"` + Spec struct { + Request struct { + Size string `json:"storage"` + } `json:"request"` + StorageClass string `json:"storageClass"` + AccessMode string `json:"accessModes"` + FilesystemType string `json:"filesystemType"` + InstanceType string `json:"instanceType"` + Encrypted bool `json:"Encrypted"` + AvailabilityZone string `json:"availabilityZone"` + } `json:"spec"` +} + +func (client *IDCServicesClient) GetFilesystems(ctx context.Context) (*Filesystems, error) { + params := struct { + Host string + Cloudaccount string + }{ + Host: *client.Host, + Cloudaccount: *client.Cloudaccount, + } + + // Parse the template string with the provided data + parsedURL, err := common.ParseString(getAllFilesystemsURL, params) + if err != nil { + return nil, fmt.Errorf("error parsing the url") + } + + retcode, retval, err := common.MakeGetAPICall(ctx, parsedURL, *client.Apitoken, nil) + tflog.Debug(ctx, "filesystem read api", map[string]any{"retcode": retcode, "retval": string(retval)}) + if err != nil { + return nil, fmt.Errorf("error reading filesystems") + } + + if retcode != http.StatusOK { + return nil, common.MapHttpError(retcode) + } + filesystems := Filesystems{} + if err := json.Unmarshal(retval, &filesystems); err != nil { + return nil, fmt.Errorf("error parsing filesystem response") + } + + var password *string + if len(filesystems.FilesystemList) != 0 { + // generate credentials. Single pair of credentials is used for all + // filesystems + // get login credentials + password, err = client.GenerateFilesystemLoginCredentials(ctx, filesystems.FilesystemList[0].Metadata.ResourceId) + if err != nil { + return nil, fmt.Errorf("error generating filesystem login credentials") + } + } + + for idx, _ := range filesystems.FilesystemList { + filesystems.FilesystemList[idx].Status.Mount.Password = *password + } + + return &filesystems, nil +} + +func (client *IDCServicesClient) GenerateFilesystemLoginCredentials(ctx context.Context, resourceId string) (*string, error) { + getLoginParams := struct { + Host string + Cloudaccount string + ResourceId string + }{ + Host: *client.Host, + Cloudaccount: *client.Cloudaccount, + ResourceId: resourceId, + } + // Parse the template string with the provided data + parsedURL, err := common.ParseString(getLoginCredentials, getLoginParams) + if err != nil { + return nil, fmt.Errorf("error parsing the url") + } + + retcode, retval, err := common.MakeGetAPICall(ctx, parsedURL, *client.Apitoken, nil) + if err != nil { + return nil, fmt.Errorf("error generating login credentials") + } + if retcode != http.StatusOK { + return nil, common.MapHttpError(retcode) + } + creds := LoginCreds{} + if err := json.Unmarshal(retval, &creds); err != nil { + return nil, fmt.Errorf("error parsing filesystem credentials response") + } + return &creds.Password, nil +} + +func (client *IDCServicesClient) CreateFilesystem(ctx context.Context, in *FilesystemCreateRequest) (*Filesystem, error) { + params := struct { + Host string + Cloudaccount string + }{ + Host: *client.Host, + Cloudaccount: *client.Cloudaccount, + } + + // Parse the template string with the provided data + parsedURL, err := common.ParseString(createFilesystemsURL, params) + if err != nil { + return nil, fmt.Errorf("error parsing the url") + } + + inArgs, err := json.MarshalIndent(in, "", " ") + if err != nil { + return nil, fmt.Errorf("error parsing input arguments") + } + + tflog.Debug(ctx, "filesystem create api", map[string]any{"url": parsedURL, "inArgs": string(inArgs)}) + retcode, retval, err := common.MakePOSTAPICall(ctx, parsedURL, *client.Apitoken, inArgs) + tflog.Debug(ctx, "filesystem create api", map[string]any{"retcode": retcode, "retval": string(retval)}) + if err != nil { + return nil, fmt.Errorf("error reading filesystem create response") + } + if retcode != http.StatusOK { + return nil, common.MapHttpError(retcode) + } + + filesystem := &Filesystem{} + if err := json.Unmarshal(retval, filesystem); err != nil { + return nil, fmt.Errorf("error parsing filesystem response") + } + + backoffTimer := retry.NewConstant(5 * time.Second) + backoffTimer = retry.WithMaxDuration(300*time.Second, backoffTimer) + + if err := retry.Do(ctx, backoffTimer, func(_ context.Context) error { + filesystem, err = client.GetFilesystemByResourceId(ctx, filesystem.Metadata.ResourceId) + if err != nil { + return fmt.Errorf("error reading filesystem state") + } + if filesystem.Status.Phase == "FSReady" { + return nil + } else if filesystem.Status.Phase == "FSFailed" { + return fmt.Errorf("filesystem state failed") + } else { + return retry.RetryableError(fmt.Errorf("filesystem state not ready, retry again")) + } + }); err != nil { + return nil, fmt.Errorf("filesystem state not ready after maximum retries") + } + + password, err := client.GenerateFilesystemLoginCredentials(ctx, filesystem.Metadata.ResourceId) + if err != nil { + return nil, fmt.Errorf("error generating login credentials") + } + filesystem.Status.Mount.Password = *password + + return filesystem, nil +} + +func (client *IDCServicesClient) GetFilesystemByResourceId(ctx context.Context, resourceId string) (*Filesystem, error) { + params := struct { + Host string + Cloudaccount string + ResourceId string + }{ + Host: *client.Host, + Cloudaccount: *client.Cloudaccount, + ResourceId: resourceId, + } + + // Parse the template string with the provided data + parsedURL, err := common.ParseString(getFilesystemByResourceId, params) + if err != nil { + return nil, fmt.Errorf("error parsing the url") + } + + retcode, retval, err := common.MakeGetAPICall(ctx, parsedURL, *client.Apitoken, nil) + if err != nil { + return nil, fmt.Errorf("error reading filesystem by resource id") + } + + if retcode != http.StatusOK { + return nil, common.MapHttpError(retcode) + } + + tflog.Debug(ctx, "filesystem read api", map[string]any{"retcode": retcode}) + filesystem := Filesystem{} + if err := json.Unmarshal(retval, &filesystem); err != nil { + return nil, fmt.Errorf("error parsing filesystem response") + } + return &filesystem, nil +} + +func (client *IDCServicesClient) DeleteFilesystemByResourceId(ctx context.Context, resourceId string) error { + params := struct { + Host string + Cloudaccount string + ResourceId string + }{ + Host: *client.Host, + Cloudaccount: *client.Cloudaccount, + ResourceId: resourceId, + } + + // Parse the template string with the provided data + parsedURL, err := common.ParseString(deleteFilesystemByResourceId, params) + if err != nil { + return fmt.Errorf("error parsing the url") + } + + retcode, retval, err := common.MakeDeleteAPICall(ctx, parsedURL, *client.Apitoken, nil) + if err != nil { + return fmt.Errorf("error deleting filesystem by resource id") + } + + tflog.Debug(ctx, "filesystem delete api", map[string]any{"retcode": retcode, "retval": string(retval)}) + + if retcode != http.StatusOK { + return common.MapHttpError(retcode) + } + + tflog.Debug(ctx, "filesystem delete api", map[string]any{"retcode": retcode}) + + return nil +} diff --git a/pkg/itacservices/instances.go b/pkg/itacservices/instances.go new file mode 100644 index 0000000..2c847e3 --- /dev/null +++ b/pkg/itacservices/instances.go @@ -0,0 +1,356 @@ +package itacservices + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "time" + + "terraform-provider-intelcloud/pkg/itacservices/common" + + "github.com/hashicorp/terraform-plugin-log/tflog" + retry "github.com/sethvargo/go-retry" +) + +var ( + getAllInstancesByAccount = "{{.Host}}/v1/cloudaccounts/{{.Cloudaccount}}/instances" + createInstance = "{{.Host}}/v1/cloudaccounts/{{.Cloudaccount}}/instances" + getInstanceByResourceId = "{{.Host}}/v1/cloudaccounts/{{.Cloudaccount}}/instances/id/{{.ResourceId}}" + deleteInstanceByResourceId = "{{.Host}}/v1/cloudaccounts/{{.Cloudaccount}}/instances/id/{{.ResourceId}}" + + getAllVNetsByAccount = "{{.Host}}/v1/cloudaccounts/{{.Cloudaccount}}/vnets" + createVNetByAccount = "{{.Host}}/v1/cloudaccounts/{{.Cloudaccount}}/vnets" +) + +type Instances struct { + Instances []Instance `json:"items"` +} + +type Instance struct { + Metadata struct { + ResourceId string `json:"resourceId"` + Cloudaccount string `json:"cloudAccountId"` + Name string `json:"name"` + CreatedAt string `json:"creationTimestamp"` + } `json:"metadata"` + Spec struct { + AvailabilityZone string `json:"availabilityZone"` + InstanceGroup string `json:"instanceGroup,omitempty"` + InstanceType string `json:"instanceType"` + Interfaces []struct { + Name string `json:"name"` + VNet string `json:"vnet"` + } `json:"interfaces"` + MachineImage string `json:"machineImage"` + SshPublicKeyNames []string `json:"sshPublicKeyNames"` + UserData string `json:"userData,omitempty"` + } `json:"spec"` + Status struct { + Interfaces []struct { + Addresses []string `json:"addresses"` + DNSName string `json:"dnsName"` + Gateway string `json:"gateway"` + Name string `json:"name"` + PrefixLength int64 `json:"prefixLength"` + Subnet string `json:"subnet"` + VNet string `json:"vNet"` + } `json:"interfaces"` + Message string `json:"message"` + Phase string `json:"phase"` + SSHProxy struct { + Address string `json:"proxyAddress"` + Port int64 `json:"proxyPort"` + User string `json:"proxyUser"` + } `json:"sshProxy"` + UserName string `json:"userName"` + } +} + +type InstanceCreateRequest struct { + Metadata struct { + Name string `json:"name"` + } `json:"metadata"` + Spec struct { + AvailabilityZone string `json:"availabilityZone"` + InstanceGroup string `json:"instanceGroup,omitempty"` + InstanceType string `json:"instanceType"` + Interfaces []struct { + Name string `json:"name"` + VNet string `json:"vNet"` + } `json:"interfaces"` + MachineImage string `json:"machineImage"` + SshPublicKeyNames []string `json:"sshPublicKeyNames"` + UserData string `json:"userData,omitempty"` + } `json:"spec"` +} + +type VNets struct { + Vnets []VNet `json:"items"` +} + +type VNet struct { + Metadata struct { + ResourceId string `json:"resourceId"` + Cloudaccount string `json:"cloudAccountId"` + Name string `json:"name"` + } `json:"metadata"` + Spec struct { + AvailabilityZone string `json:"availabilityZone"` + Region string `json:"region"` + PrefixLength int64 `json:"prefixLength"` + } +} + +type VNetCreateRequest struct { + Metadata struct { + Name string `json:"name"` + } `json:"metadata"` + Spec struct { + AvailabilityZone string `json:"availabilityZone"` + Region string `json:"region"` + PrefixLength int64 `json:"prefixLength"` + } +} + +func (client *IDCServicesClient) GetInstances(ctx context.Context) (*Instances, error) { + params := struct { + Host string + Cloudaccount string + }{ + Host: *client.Host, + Cloudaccount: *client.Cloudaccount, + } + + // Parse the template string with the provided data + parsedURL, err := common.ParseString(getAllInstancesByAccount, params) + if err != nil { + return nil, fmt.Errorf("error parsing the url") + } + + retcode, retval, err := common.MakeGetAPICall(ctx, parsedURL, *client.Apitoken, nil) + tflog.Debug(ctx, "instances read api", map[string]any{"retcode": retcode, "retval": string(retval)}) + if err != nil { + return nil, fmt.Errorf("error reading instances") + } + + if retcode != http.StatusOK { + return nil, common.MapHttpError(retcode) + } + + instances := Instances{} + if err := json.Unmarshal(retval, &instances); err != nil { + return nil, fmt.Errorf("error parsing instances get response, %v", err) + } + return &instances, nil +} + +func (client *IDCServicesClient) CreateInstance(ctx context.Context, in *InstanceCreateRequest, async bool) (*Instance, error) { + params := struct { + Host string + Cloudaccount string + }{ + Host: *client.Host, + Cloudaccount: *client.Cloudaccount, + } + + // Parse the template string with the provided data + parsedURL, err := common.ParseString(createInstance, params) + if err != nil { + return nil, fmt.Errorf("error parsing the url") + } + + inArgs, err := json.MarshalIndent(in, "", " ") + if err != nil { + return nil, fmt.Errorf("error parsing input arguments") + } + + tflog.Debug(ctx, "instance create api request", map[string]any{"url": parsedURL, "inArgs": string(inArgs)}) + retcode, retval, err := common.MakePOSTAPICall(ctx, parsedURL, *client.Apitoken, inArgs) + + if err != nil { + return nil, fmt.Errorf("error reading instance create response") + } + tflog.Debug(ctx, "instance create api response", map[string]any{"retcode": retcode, "retval": string(retval)}) + + if retcode != http.StatusOK { + return nil, common.MapHttpError(retcode) + } + + instance := &Instance{} + if err := json.Unmarshal(retval, instance); err != nil { + return nil, fmt.Errorf("error parsing instance response") + } + + if async { + instance, err = client.GetInstanceByResourceId(ctx, instance.Metadata.ResourceId) + if err != nil { + return instance, fmt.Errorf("error reading instance state") + } + } else { + backoffTimer := retry.NewConstant(5 * time.Second) + backoffTimer = retry.WithMaxDuration(300*time.Second, backoffTimer) + + if err := retry.Do(ctx, backoffTimer, func(_ context.Context) error { + instance, err = client.GetInstanceByResourceId(ctx, instance.Metadata.ResourceId) + if err != nil { + return fmt.Errorf("error reading instance state") + } + if instance.Status.Phase == "Ready" { + return nil + } else if instance.Status.Phase == "Failed" { + return fmt.Errorf("instance state failed") + } else { + return retry.RetryableError(fmt.Errorf("instance state not ready, retry again")) + } + }); err != nil { + return nil, fmt.Errorf("instance state not ready after maximum retries") + } + } + return instance, nil +} + +func (client *IDCServicesClient) GetInstanceByResourceId(ctx context.Context, resourceId string) (*Instance, error) { + params := struct { + Host string + Cloudaccount string + ResourceId string + }{ + Host: *client.Host, + Cloudaccount: *client.Cloudaccount, + ResourceId: resourceId, + } + + // Parse the template string with the provided data + parsedURL, err := common.ParseString(getInstanceByResourceId, params) + if err != nil { + return nil, fmt.Errorf("error parsing the url") + } + + retcode, retval, err := common.MakeGetAPICall(ctx, parsedURL, *client.Apitoken, nil) + if err != nil { + return nil, fmt.Errorf("error reading sshkey by resource id") + } + + if retcode != http.StatusOK { + return nil, common.MapHttpError(retcode) + } + + tflog.Debug(ctx, "get instance api", map[string]any{"retcode": retcode}) + instance := Instance{} + if err := json.Unmarshal(retval, &instance); err != nil { + return nil, fmt.Errorf("error parsing get instance response") + } + return &instance, nil +} + +func (client *IDCServicesClient) DeleteInstanceByResourceId(ctx context.Context, resourceId string) error { + params := struct { + Host string + Cloudaccount string + ResourceId string + }{ + Host: *client.Host, + Cloudaccount: *client.Cloudaccount, + ResourceId: resourceId, + } + + // Parse the template string with the provided data + parsedURL, err := common.ParseString(deleteInstanceByResourceId, params) + if err != nil { + return fmt.Errorf("error parsing the url") + } + + retcode, _, err := common.MakeDeleteAPICall(ctx, parsedURL, *client.Apitoken, nil) + if err != nil { + return fmt.Errorf("error deleting sshkey by resource id") + } + + if retcode != http.StatusOK { + return common.MapHttpError(retcode) + } + + tflog.Debug(ctx, "instance delete api", map[string]any{"retcode": retcode}) + + return nil +} + +func (client *IDCServicesClient) CreateVNetIfNotFound(ctx context.Context) (*VNet, error) { + + params := struct { + Host string + Cloudaccount string + }{ + Host: *client.Host, + Cloudaccount: *client.Cloudaccount, + } + + // Parse the template string with the provided data + parsedURL, err := common.ParseString(getAllVNetsByAccount, params) + if err != nil { + return nil, fmt.Errorf("error parsing the url") + } + tflog.Debug(ctx, "vnets get api request", map[string]any{"url": parsedURL}) + + retcode, retval, err := common.MakeGetAPICall(ctx, parsedURL, *client.Apitoken, nil) + + if err != nil || retcode != http.StatusOK { + tflog.Debug(ctx, "vnet get response", map[string]any{"retcode": retcode, "error": err}) + return nil, fmt.Errorf("error reading vnets get response") + } + + vnets := VNets{} + if err := json.Unmarshal(retval, &vnets); err != nil { + return nil, fmt.Errorf("error parsing instance response") + } + tflog.Debug(ctx, "vnets get api response", map[string]any{"retcode": retcode, "retval": vnets}) + + if len(vnets.Vnets) > 0 { + return &(vnets.Vnets[0]), nil + } + + tflog.Debug(ctx, "vnets not found, creating a new") + + inArgs := VNetCreateRequest{ + Metadata: struct { + Name string "json:\"name\"" + }{ + Name: "us-staging-1a-default", + }, + Spec: struct { + AvailabilityZone string "json:\"availabilityZone\"" + Region string "json:\"region\"" + PrefixLength int64 "json:\"prefixLength\"" + }{ + AvailabilityZone: "us-staging-1a", + Region: "us-staging-1", + PrefixLength: 24, + }, + } + + payload, err := json.MarshalIndent(inArgs, "", " ") + if err != nil { + return nil, fmt.Errorf("error parsing input arguments") + } + + // Parse the template string with the provided data + parsedURL, err = common.ParseString(createVNetByAccount, params) + if err != nil { + return nil, fmt.Errorf("error parsing the url") + } + + retcode, retval, err = common.MakePOSTAPICall(ctx, parsedURL, *client.Apitoken, payload) + + if err != nil || retcode != http.StatusOK { + return nil, fmt.Errorf("error reading vnet create response") + } + + vnet := VNet{} + if err := json.Unmarshal(retval, &vnet); err != nil { + return nil, fmt.Errorf("error parsing vnet response") + } + tflog.Debug(ctx, "vnet create api response", map[string]any{"retcode": retcode, "retval": vnet}) + + return &vnet, nil + +} diff --git a/pkg/itacservices/kubernetes.go b/pkg/itacservices/kubernetes.go new file mode 100644 index 0000000..7b573e6 --- /dev/null +++ b/pkg/itacservices/kubernetes.go @@ -0,0 +1,627 @@ +package itacservices + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "strings" + "time" + + "terraform-provider-intelcloud/pkg/itacservices/common" + + "github.com/hashicorp/terraform-plugin-log/tflog" + retry "github.com/sethvargo/go-retry" +) + +var ( + getAllK8sClustersURL = "{{.Host}}/v1/cloudaccounts/{{.Cloudaccount}}/iks/clusters" + createK8sClusterURL = "{{.Host}}/v1/cloudaccounts/{{.Cloudaccount}}/iks/clusters" + getIksClusterByClusterUUID = "{{.Host}}/v1/cloudaccounts/{{.Cloudaccount}}/iks/clusters/{{.ClusterUUID}}" + deleteIksCluster = "{{.Host}}/v1/cloudaccounts/{{.Cloudaccount}}/iks/clusters/{{.ClusterUUID}}" + + createK8sNodeGroupURL = "{{.Host}}/v1/cloudaccounts/{{.Cloudaccount}}/iks/clusters/{{.ClusterUUID}}/nodegroups" + getK8sNodeGroupURL = "{{.Host}}/v1/cloudaccounts/{{.Cloudaccount}}/iks/clusters/{{.ClusterUUID}}/nodegroups/{{.NodeGroupUUID}}" + + createK8sFileStorageURL = "{{.Host}}/v1/cloudaccounts/{{.Cloudaccount}}/iks/clusters/{{.ClusterUUID}}/storage" + createIKSLBURL = "{{.Host}}/v1/cloudaccounts/{{.Cloudaccount}}/iks/clusters/{{.ClusterUUID}}/vips" + getIKSLBURLByCluster = "{{.Host}}/v1/cloudaccounts/{{.Cloudaccount}}/iks/clusters/{{.ClusterUUID}}/vips" + getIKSLBURLByID = "{{.Host}}/v1/cloudaccounts/{{.Cloudaccount}}/iks/clusters/{{.ClusterUUID}}/vips/{{.VipID}}" +) + +type IKSClusters struct { + Clusters []IKSCluster `json:"clusters"` +} + +type IKSCluster struct { + ResourceId string `json:"uuid"` + Name string `json:"name"` + Description string `json:"description"` + CreatedAt string `json:"createddate"` + ClusterState string `json:"clusterstate"` + K8sVersion string `json:"k8sversion"` + UpgradeAvailable bool `json:"upgradeavailable"` + UpgradableK8sVersions []string `json:"upgradek8sversionavailable"` + Network ClusterNetwork `json:"network"` + NodeGroups []NodeGroup `json:"nodegroups"` + StorageEnabled bool `json:"storageenabled"` + Storages []K8sStorage `json:"storages"` + VIPs []IKSVIP `json:"vips"` +} + +type IKSVIP struct { + Id int64 `json:"vipid"` + Name string `json:"name"` + State string `json:"vipstate"` + IP string `json:"vipIp"` + Port int64 `json:"port"` + PoolPort int64 `json:"poolport"` + Type string `json:"viptype"` +} + +type ClusterNetwork struct { + EnableLB bool `json:"enableloadbalancer"` + ServcieCIDR string `json:"servicecidr"` + ClusterCIDR string `json:"clustercidr"` + ClusterDNS string `json:"clusterdns"` +} + +type NodeGroup struct { + ID string `json:"nodegroupuuid"` + Name string `json:"name"` + Count int64 `json:"count"` + InstanceType string `json:"instancetypeid"` + State string `json:"nodegroupstate"` + SSHKeyNames []SKey `json:"sshkeyname"` + NetworkInterfaceName string `json:"networkinterfacename"` + IMIID string `json:"imiid"` + UserDataURL string `json:"userdataurl"` +} + +type SKey struct { + Name string `json:"sshkey"` +} + +type K8sStorage struct { + Provider string `json:"storageprovider"` + Size string `json:"size"` + State string `json:"state"` +} + +type IKSNodeGroupCreateRequest struct { + Count int64 `json:"count"` + Name string `json:"name"` + ProductType string `json:"instanceType"` + InstanceTypeId string `json:"instancetypeid"` + SSHKeyNames []SKey `json:"sshkeyname"` + UserDataURL string `json:"userdataurl"` + Interfaces []struct { + AvailabilityZone string `json:"availabilityzonename"` + VNet string `json:"networkinterfacevnetname"` + } `json:"vnets"` +} + +type IKSCreateRequest struct { + Name string `json:"name"` + Count int64 `json:"count"` + K8sVersion string `json:"k8sversionname"` + InstanceType string `json:"instanceType"` + RuntimeName string `json:"runtimename"` +} + +type IKSStorageCreateRequest struct { + Enable bool `json:"enablestorage"` + Size string `json:"storagesize"` +} + +type IKSLoadBalancerRequest struct { + Name string `json:"name"` + Port int `json:"port"` + VIPType string `json:"viptype"` +} + +type IKSLoadBalancer struct { + ID int64 `json:"vipid"` + Name string `json:"name"` + Port int `json:"port"` + VIPType string `json:"viptype"` + VIPState string `json:"vipstate"` + VIPIP string `json:"vipip"` + PoolPort int `json:"poolport"` +} + +type IKSLBsByCluster struct { + Items []IKSLoadBalancer `json:"response"` +} + +func (client *IDCServicesClient) GetKubernetesClusters(ctx context.Context) (*IKSClusters, *string, error) { + params := struct { + Host string + Cloudaccount string + }{ + Host: *client.Host, + Cloudaccount: *client.Cloudaccount, + } + + // Parse the template string with the provided data + parsedURL, err := common.ParseString(getAllK8sClustersURL, params) + if err != nil { + return nil, nil, fmt.Errorf("error parsing the url") + } + + retcode, retval, err := common.MakeGetAPICall(ctx, parsedURL, *client.Apitoken, nil) + tflog.Debug(ctx, "iks read api", map[string]any{"retcode": retcode, "retval": string(retval)}) + if err != nil { + return nil, nil, fmt.Errorf("error reading iks clusters") + } + + if retcode != http.StatusOK { + return nil, nil, common.MapHttpError(retcode) + } + clusters := IKSClusters{} + if err := json.Unmarshal(retval, &clusters); err != nil { + tflog.Debug(ctx, "iks read api", map[string]any{"err": err}) + return nil, nil, fmt.Errorf("error parsing iks cluster response") + } + + return &clusters, client.Cloudaccount, nil +} + +func (client *IDCServicesClient) CreateIKSCluster(ctx context.Context, in *IKSCreateRequest, async bool) (*IKSCluster, *string, error) { + params := struct { + Host string + Cloudaccount string + }{ + Host: *client.Host, + Cloudaccount: *client.Cloudaccount, + } + + // Parse the template string with the provided data + parsedURL, err := common.ParseString(createK8sClusterURL, params) + if err != nil { + return nil, nil, fmt.Errorf("error parsing the url") + } + + inArgs, err := json.MarshalIndent(in, "", " ") + if err != nil { + return nil, nil, fmt.Errorf("error parsing input arguments") + } + + tflog.Debug(ctx, "iks create api request", map[string]any{"url": parsedURL, "inArgs": string(inArgs)}) + retcode, retval, err := common.MakePOSTAPICall(ctx, parsedURL, *client.Apitoken, inArgs) + + if err != nil { + return nil, nil, fmt.Errorf("error reading iks create response") + } + tflog.Debug(ctx, "iks create api response", map[string]any{"retcode": retcode, "retval": string(retval)}) + + if retcode != http.StatusOK { + return nil, nil, common.MapHttpError(retcode) + } + + cluster := &IKSCluster{} + if err := json.Unmarshal(retval, cluster); err != nil { + return nil, nil, fmt.Errorf("error parsing instance response") + } + + if async { + cluster, _, err = client.GetIKSClusterByClusterUUID(ctx, cluster.ResourceId) + if err != nil { + return cluster, nil, fmt.Errorf("error reading iks cluster state") + } + } else { + backoffTimer := retry.NewConstant(5 * time.Second) + backoffTimer = retry.WithMaxDuration(1800*time.Second, backoffTimer) + + if err := retry.Do(ctx, backoffTimer, func(_ context.Context) error { + cluster, _, err = client.GetIKSClusterByClusterUUID(ctx, cluster.ResourceId) + if err != nil { + return fmt.Errorf("error reading instance state") + } + if cluster.ClusterState == "Active" { + return nil + } else if cluster.ClusterState == "Failed" { + return fmt.Errorf("instance state failed") + } else { + return retry.RetryableError(fmt.Errorf("iks cluster state not ready, retry again")) + } + }); err != nil { + return nil, nil, fmt.Errorf("iks cluster state not ready after maximum retries") + } + } + + return cluster, client.Cloudaccount, nil +} + +func (client *IDCServicesClient) GetIKSClusterByClusterUUID(ctx context.Context, clusterUUID string) (*IKSCluster, *string, error) { + params := struct { + Host string + Cloudaccount string + ClusterUUID string + }{ + Host: *client.Host, + Cloudaccount: *client.Cloudaccount, + ClusterUUID: clusterUUID, + } + + // Parse the template string with the provided data + parsedURL, err := common.ParseString(getIksClusterByClusterUUID, params) + if err != nil { + return nil, nil, fmt.Errorf("error parsing the url") + } + + retcode, retval, err := common.MakeGetAPICall(ctx, parsedURL, *client.Apitoken, nil) + if err != nil { + return nil, nil, fmt.Errorf("error reading sshkey by resource id") + } + tflog.Debug(ctx, "iks create api response", map[string]any{"retcode": retcode, "retval": string(retval)}) + + if retcode != http.StatusOK { + return nil, nil, common.MapHttpError(retcode) + } + + cluster := IKSCluster{} + if err := json.Unmarshal(retval, &cluster); err != nil { + return nil, nil, fmt.Errorf("error parsing iks cluster get response") + } + return &cluster, client.Cloudaccount, nil +} + +func (client *IDCServicesClient) DeleteIKSCluster(ctx context.Context, clusterUUID string) error { + params := struct { + Host string + Cloudaccount string + ClusterUUID string + }{ + Host: *client.Host, + Cloudaccount: *client.Cloudaccount, + ClusterUUID: clusterUUID, + } + + // Parse the template string with the provided data + parsedURL, err := common.ParseString(deleteIksCluster, params) + if err != nil { + return fmt.Errorf("error parsing the url") + } + + tflog.Debug(ctx, "iks cluster delete api", map[string]any{"parsedurl": parsedURL}) + retcode, _, err := common.MakeDeleteAPICall(ctx, parsedURL, *client.Apitoken, nil) + if err != nil { + return fmt.Errorf("error deleting sshkey by resource id") + } + + if retcode != http.StatusOK { + return common.MapHttpError(retcode) + } + + tflog.Debug(ctx, "iks cluster delete api", map[string]any{"retcode": retcode}) + + return nil +} + +func (client *IDCServicesClient) CreateIKSNodeGroup(ctx context.Context, in *IKSNodeGroupCreateRequest, clusterUUID string, async bool) (*NodeGroup, *string, error) { + params := struct { + Host string + Cloudaccount string + ClusterUUID string + }{ + Host: *client.Host, + Cloudaccount: *client.Cloudaccount, + ClusterUUID: clusterUUID, + } + + // Parse the template string with the provided data + parsedURL, err := common.ParseString(createK8sNodeGroupURL, params) + if err != nil { + return nil, nil, fmt.Errorf("error parsing the url") + } + + inArgs, err := json.MarshalIndent(in, "", " ") + if err != nil { + return nil, nil, fmt.Errorf("error parsing input arguments") + } + + tflog.Debug(ctx, "iks node group create api request", map[string]any{"url": parsedURL, "inArgs": string(inArgs)}) + retcode, retval, err := common.MakePOSTAPICall(ctx, parsedURL, *client.Apitoken, inArgs) + + if err != nil { + return nil, nil, fmt.Errorf("error reading iks node group create response") + } + tflog.Debug(ctx, "iks node group create api response", map[string]any{"retcode": retcode, "retval": string(retval)}) + + if retcode != http.StatusOK { + return nil, nil, common.MapHttpError(retcode) + } + + ng := &NodeGroup{} + if err := json.Unmarshal(retval, ng); err != nil { + return nil, nil, fmt.Errorf("error parsing node group response") + } + + backoffTimer := retry.NewConstant(5 * time.Second) + backoffTimer = retry.WithMaxDuration(3000*time.Second, backoffTimer) + + if err := retry.Do(ctx, backoffTimer, func(_ context.Context) error { + ng, _, err = client.GetIKSNodeGroupByID(ctx, clusterUUID, ng.ID) + if err != nil { + return fmt.Errorf("error reading node group state") + } + tflog.Debug(ctx, "iks node group create api response", map[string]any{"nodegroupuuid": ng.ID, "state": ng.State}) + if ng.State == "Active" { + return nil + } else if ng.State == "Failed" { + return fmt.Errorf("node group state failed") + } + return retry.RetryableError(fmt.Errorf("iks node group state not ready, retry again")) + }); err != nil { + return nil, nil, fmt.Errorf("iks node group state not ready after maximum retries") + } + return ng, client.Cloudaccount, nil +} + +func (client *IDCServicesClient) GetIKSNodeGroupByID(ctx context.Context, clusterId, ngId string) (*NodeGroup, *string, error) { + params := struct { + Host string + Cloudaccount string + ClusterUUID string + NodeGroupUUID string + }{ + Host: *client.Host, + Cloudaccount: *client.Cloudaccount, + ClusterUUID: clusterId, + NodeGroupUUID: ngId, + } + + // Parse the template string with the provided data + parsedURL, err := common.ParseString(getK8sNodeGroupURL, params) + if err != nil { + return nil, nil, fmt.Errorf("error parsing the url") + } + + retcode, retval, err := common.MakeGetAPICall(ctx, parsedURL, *client.Apitoken, nil) + if err != nil { + return nil, nil, fmt.Errorf("error reading node group resource by id") + } + tflog.Debug(ctx, "iks node group read response", map[string]any{"retcode": retcode, "retval": string(retval)}) + + if retcode != http.StatusOK { + return nil, nil, common.MapHttpError(retcode) + } + + nodeGroup := NodeGroup{} + if err := json.Unmarshal(retval, &nodeGroup); err != nil { + return nil, nil, fmt.Errorf("error parsing iks cluster get response") + } + return &nodeGroup, client.Cloudaccount, nil +} + +func (client *IDCServicesClient) CreateIKSStorage(ctx context.Context, in *IKSStorageCreateRequest, clusterUUID string) (*K8sStorage, *string, error) { + params := struct { + Host string + Cloudaccount string + ClusterUUID string + }{ + Host: *client.Host, + Cloudaccount: *client.Cloudaccount, + ClusterUUID: clusterUUID, + } + + // Parse the template string with the provided data + parsedURL, err := common.ParseString(createK8sFileStorageURL, params) + if err != nil { + return nil, nil, fmt.Errorf("error parsing the url") + } + + inArgs, err := json.MarshalIndent(in, "", " ") + if err != nil { + return nil, nil, fmt.Errorf("error parsing input arguments") + } + + tflog.Debug(ctx, "iks file storage create api request", map[string]any{"url": parsedURL, "inArgs": string(inArgs)}) + retcode, retval, err := common.MakePOSTAPICall(ctx, parsedURL, *client.Apitoken, inArgs) + + if err != nil { + return nil, nil, fmt.Errorf("error reading iks file storage create response") + } + tflog.Debug(ctx, "iks file storage create api response", map[string]any{"retcode": retcode, "retval": string(retval)}) + + if retcode != http.StatusOK { + return nil, nil, common.MapHttpError(retcode) + } + + storage := &K8sStorage{} + if err := json.Unmarshal(retval, storage); err != nil { + return nil, nil, fmt.Errorf("error parsing node group response") + } + + backoffTimer := retry.NewConstant(5 * time.Second) + backoffTimer = retry.WithMaxDuration(3000*time.Second, backoffTimer) + + if err := retry.Do(ctx, backoffTimer, func(_ context.Context) error { + iksCluster, _, err := client.GetIKSClusterByClusterUUID(ctx, clusterUUID) + if err != nil { + return fmt.Errorf("error reading node group state") + } + for _, v := range iksCluster.Storages { + if strings.EqualFold(v.Size, storage.Size) { + if v.State == "Active" { + storage.Provider = v.Provider + storage.State = v.State + return nil + } else if v.State == "Failed" { + return fmt.Errorf("file storage state failed") + } + } else { + return retry.RetryableError(fmt.Errorf("iks file storage state not ready, retry again")) + } + } + return retry.RetryableError(fmt.Errorf("iks file storage state not ready, retry again")) + }); err != nil { + return nil, nil, fmt.Errorf("iks node group state not ready after maximum retries") + } + + return storage, client.Cloudaccount, nil +} + +func (client *IDCServicesClient) CreateIKSLoadBalancer(ctx context.Context, in *IKSLoadBalancerRequest, clusterUUID string) (*IKSLoadBalancer, *string, error) { + params := struct { + Host string + Cloudaccount string + ClusterUUID string + }{ + Host: *client.Host, + Cloudaccount: *client.Cloudaccount, + ClusterUUID: clusterUUID, + } + + // Parse the template string with the provided data + parsedURL, err := common.ParseString(createIKSLBURL, params) + if err != nil { + return nil, nil, fmt.Errorf("error parsing the url") + } + + inArgs, err := json.MarshalIndent(in, "", " ") + if err != nil { + return nil, nil, fmt.Errorf("error parsing input arguments") + } + + tflog.Debug(ctx, "iks load balancer create api request", map[string]any{"url": parsedURL, "inArgs": string(inArgs)}) + retcode, retval, err := common.MakePOSTAPICall(ctx, parsedURL, *client.Apitoken, inArgs) + + if err != nil { + return nil, nil, fmt.Errorf("error reading iks load balancer create response") + } + tflog.Debug(ctx, "iks load balancer create api response", map[string]any{"retcode": retcode, "retval": string(retval)}) + + if retcode != http.StatusOK { + return nil, nil, common.MapHttpError(retcode) + } + + iksLB := &IKSLoadBalancer{} + if err := json.Unmarshal(retval, iksLB); err != nil { + return nil, nil, fmt.Errorf("error parsing load balancer response") + } + + backoffTimer := retry.NewConstant(5 * time.Second) + backoffTimer = retry.WithMaxDuration(3000*time.Second, backoffTimer) + + if err := retry.Do(ctx, backoffTimer, func(_ context.Context) error { + iksLB, err = client.GetIKSLoadBalancerByID(ctx, clusterUUID, iksLB.ID) + if err != nil { + return fmt.Errorf("error reading node group state") + } + if iksLB.VIPState == "Active" { + return nil + } else { + return retry.RetryableError(fmt.Errorf("iks load balancer state not ready, retry again")) + } + }); err != nil { + return nil, nil, fmt.Errorf("iks node group state not ready after maximum retries") + } + + return iksLB, client.Cloudaccount, nil +} + +func (client *IDCServicesClient) GetIKSLoadBalancerByID(ctx context.Context, clusterUUID string, vipId int64) (*IKSLoadBalancer, error) { + params := struct { + Host string + Cloudaccount string + ClusterUUID string + VipID int64 + }{ + Host: *client.Host, + Cloudaccount: *client.Cloudaccount, + ClusterUUID: clusterUUID, + VipID: vipId, + } + + // Parse the template string with the provided data + parsedURL, err := common.ParseString(getIKSLBURLByID, params) + if err != nil { + return nil, fmt.Errorf("error parsing the url") + } + + retcode, retval, err := common.MakeGetAPICall(ctx, parsedURL, *client.Apitoken, nil) + if err != nil { + return nil, fmt.Errorf("error reading load balancer resource by id") + } + tflog.Debug(ctx, "iks load balancer read response", map[string]any{"retcode": retcode, "retval": string(retval)}) + + if retcode != http.StatusOK { + return nil, common.MapHttpError(retcode) + } + + iksLB := IKSLoadBalancer{} + if err := json.Unmarshal(retval, &iksLB); err != nil { + return nil, fmt.Errorf("error parsing iks load balancer get response") + } + return &iksLB, nil +} + +func (client *IDCServicesClient) GetIKSLoadBalancerByClusterUUID(ctx context.Context, clusterUUID string) (*IKSLBsByCluster, error) { + params := struct { + Host string + Cloudaccount string + ClusterUUID string + }{ + Host: *client.Host, + Cloudaccount: *client.Cloudaccount, + ClusterUUID: clusterUUID, + } + + // Parse the template string with the provided data + parsedURL, err := common.ParseString(getIKSLBURLByCluster, params) + if err != nil { + return nil, fmt.Errorf("error parsing the url") + } + + retcode, retval, err := common.MakeGetAPICall(ctx, parsedURL, *client.Apitoken, nil) + if err != nil { + return nil, fmt.Errorf("error reading load balancer resource by cluster") + } + tflog.Debug(ctx, "iks load balancer read response", map[string]any{"retcode": retcode, "retval": string(retval)}) + + if retcode != http.StatusOK { + return nil, common.MapHttpError(retcode) + } + + resp := IKSLBsByCluster{} + if err := json.Unmarshal(retval, &resp); err != nil { + return nil, fmt.Errorf("error parsing iks load balancer get response") + } + return &resp, nil +} + +func (client *IDCServicesClient) DeleteIKSNodeGroup(ctx context.Context, clusterId, ngId string) error { + params := struct { + Host string + Cloudaccount string + ClusterUUID string + NodeGroupUUID string + }{ + Host: *client.Host, + Cloudaccount: *client.Cloudaccount, + ClusterUUID: clusterId, + NodeGroupUUID: ngId, + } + + // Parse the template string with the provided data + parsedURL, err := common.ParseString(getK8sNodeGroupURL, params) + if err != nil { + return fmt.Errorf("error parsing the url") + } + + tflog.Debug(ctx, "iks node group delete api", map[string]any{"parsedurl": parsedURL}) + retcode, _, err := common.MakeDeleteAPICall(ctx, parsedURL, *client.Apitoken, nil) + if err != nil { + return fmt.Errorf("error deleting iks node group by resource id") + } + + if retcode != http.StatusOK { + return common.MapHttpError(retcode) + } + + tflog.Debug(ctx, "iks node group delete api", map[string]any{"retcode": retcode}) + + return nil +} diff --git a/pkg/itacservices/object_storage.go b/pkg/itacservices/object_storage.go new file mode 100644 index 0000000..89a6f49 --- /dev/null +++ b/pkg/itacservices/object_storage.go @@ -0,0 +1,322 @@ +package itacservices + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "terraform-provider-intelcloud/pkg/itacservices/common" + "time" + + "github.com/hashicorp/terraform-plugin-log/tflog" + retry "github.com/sethvargo/go-retry" +) + +const ( + createObjectStorageBucketURL = "{{.Host}}/v1/cloudaccounts/{{.Cloudaccount}}/objects/buckets" + getObjectStorageBucketByResourceId = "{{.Host}}/v1/cloudaccounts/{{.Cloudaccount}}/objects/buckets/id/{{.ResourceId}}" + deleteObjectStorageBucketByResourceId = "{{.Host}}/v1/cloudaccounts/{{.Cloudaccount}}/objects/buckets/id/{{.ResourceId}}" + createObjectStorageUserURL = "{{.Host}}/v1/cloudaccounts/{{.Cloudaccount}}/objects/users" + deleteObjectStorageUserURL = "{{.Host}}/v1/cloudaccounts/{{.Cloudaccount}}/objects/users/id/{{.ResourceId}}" + getObjectStorageUserURL = "{{.Host}}/v1/cloudaccounts/{{.Cloudaccount}}/objects/users/id/{{.ResourceId}}" +) + +type ObjectBucketCreateRequest struct { + Metadata struct { + Name string `json:"name"` + } `json:"metadata"` + Spec struct { + Versioned bool `json:"versioned"` + InstanceType string `json:"instanceType"` + } `json:"spec"` +} + +type ObjectBucket struct { + Metadata struct { + Name string `json:"name"` + ResourceId string `json:"resourceId"` + Cloudaccount string `json:"cloudAccountId"` + } `json:"metadata"` + Spec struct { + Versioned bool `json:"versioned"` + InstanceType string `json:"instanceType"` + Request struct { + Size string `json:"size"` + } `json:"request"` + } `json:"spec"` + Status struct { + Phase string `json:"phase"` + Cluster struct { + AccessEndpoint string `json:"accessEndpoint"` + ClusterId string `json:"clusterId"` + } `json:"cluster"` + SecurityGroups struct { + NetworkFilterAllow []struct { + Gateway string `json:"gateway"` + PrefixLength int `json:"prefixLength"` + Subnet string `json:"subnet"` + } `json:"networkFilterAllow"` + } `json:"securityGroup"` + } `json:"status"` +} + +type ObjectUserCreateRequest struct { + Metadata struct { + Name string `json:"name"` + } `json:"metadata"` + Spec []BucketPolicy `json:"spec"` +} + +type BucketPolicy struct { + BucketId string `json:"bucketId"` + Actions []string `json:"actions"` + Permissions []string `json:"permission"` + Prefix string `json:"prefix"` +} + +type ObjectUser struct { + Metadata struct { + Name string `json:"name"` + UserId string `json:"userId"` + Cloudaccount string `json:"cloudAccountId"` + } `json:"metadata"` + Spec []BucketPolicy `json:"spec"` + Status struct { + Phase string `json:"phase"` + Principal struct { + Credentials struct { + AccessKey string `json:"accessKey"` + SecretKey string `json:"secretKey"` + } `json:"credentials"` + } `json:"principal"` + } +} + +func (client *IDCServicesClient) CreateObjectStorageBucket(ctx context.Context, in *ObjectBucketCreateRequest) (*ObjectBucket, error) { + params := struct { + Host string + Cloudaccount string + }{ + Host: *client.Host, + Cloudaccount: *client.Cloudaccount, + } + + // Parse the template string with the provided data + parsedURL, err := common.ParseString(createObjectStorageBucketURL, params) + if err != nil { + return nil, fmt.Errorf("error parsing the url") + } + + inArgs, err := json.MarshalIndent(in, "", " ") + if err != nil { + return nil, fmt.Errorf("error parsing input arguments") + } + + tflog.Debug(ctx, "bucket create api", map[string]any{"url": parsedURL, "inArgs": string(inArgs)}) + retcode, retval, err := common.MakePOSTAPICall(ctx, parsedURL, *client.Apitoken, inArgs) + tflog.Debug(ctx, "bucket create api", map[string]any{"retcode": retcode, "retval": string(retval)}) + if err != nil { + return nil, fmt.Errorf("error reading bucket create response") + } + if retcode != http.StatusOK { + return nil, common.MapHttpError(retcode) + } + + bucket := &ObjectBucket{} + if err := json.Unmarshal(retval, bucket); err != nil { + return nil, fmt.Errorf("error parsing bucket response") + } + + backoffTimer := retry.NewConstant(5 * time.Second) + backoffTimer = retry.WithMaxDuration(300*time.Second, backoffTimer) + + if err := retry.Do(ctx, backoffTimer, func(_ context.Context) error { + bucket, err = client.GetObjectBucketByResourceId(ctx, bucket.Metadata.ResourceId) + if err != nil { + return fmt.Errorf("error reading bucket state") + } + if bucket.Status.Phase == "BucketReady" { + return nil + } else if bucket.Status.Phase == "BucketFailed" { + return fmt.Errorf("bucket state failed") + } else { + return retry.RetryableError(fmt.Errorf("bucket state not ready, retry again")) + } + }); err != nil { + return nil, fmt.Errorf("bucket state not ready after maximum retries") + } + + return bucket, nil +} + +func (client *IDCServicesClient) GetObjectBucketByResourceId(ctx context.Context, resourceId string) (*ObjectBucket, error) { + params := struct { + Host string + Cloudaccount string + ResourceId string + }{ + Host: *client.Host, + Cloudaccount: *client.Cloudaccount, + ResourceId: resourceId, + } + + // Parse the template string with the provided data + parsedURL, err := common.ParseString(getObjectStorageBucketByResourceId, params) + if err != nil { + return nil, fmt.Errorf("error parsing the url") + } + + retcode, retval, err := common.MakeGetAPICall(ctx, parsedURL, *client.Apitoken, nil) + if err != nil { + return nil, fmt.Errorf("error reading bucket by resource id") + } + + if retcode != http.StatusOK { + return nil, common.MapHttpError(retcode) + } + + tflog.Debug(ctx, "object read api", map[string]any{"retcode": retcode}) + bucket := ObjectBucket{} + if err := json.Unmarshal(retval, &bucket); err != nil { + return nil, fmt.Errorf("error parsing bucket response") + } + return &bucket, nil +} + +func (client *IDCServicesClient) DeleteBucketByResourceId(ctx context.Context, resourceId string) error { + params := struct { + Host string + Cloudaccount string + ResourceId string + }{ + Host: *client.Host, + Cloudaccount: *client.Cloudaccount, + ResourceId: resourceId, + } + + // Parse the template string with the provided data + parsedURL, err := common.ParseString(deleteObjectStorageBucketByResourceId, params) + if err != nil { + return fmt.Errorf("error parsing the url") + } + + retcode, retval, err := common.MakeDeleteAPICall(ctx, parsedURL, *client.Apitoken, nil) + if err != nil { + return fmt.Errorf("error deleting object bucket by resource id") + } + + tflog.Debug(ctx, "object bucket delete api", map[string]any{"retcode": retcode, "retval": string(retval)}) + + if retcode != http.StatusOK { + return common.MapHttpError(retcode) + } + + tflog.Debug(ctx, "object bucket delete api", map[string]any{"retcode": retcode}) + + return nil +} + +func (client *IDCServicesClient) CreateObjectStorageUser(ctx context.Context, in *ObjectUserCreateRequest) (*ObjectUser, error) { + params := struct { + Host string + Cloudaccount string + }{ + Host: *client.Host, + Cloudaccount: *client.Cloudaccount, + } + + // Parse the template string with the provided data + parsedURL, err := common.ParseString(createObjectStorageUserURL, params) + if err != nil { + return nil, fmt.Errorf("error parsing the url") + } + + inArgs, err := json.MarshalIndent(in, "", " ") + if err != nil { + return nil, fmt.Errorf("error parsing input arguments") + } + + tflog.Debug(ctx, "bucket user create api", map[string]any{"url": parsedURL, "inArgs": string(inArgs)}) + retcode, retval, err := common.MakePOSTAPICall(ctx, parsedURL, *client.Apitoken, inArgs) + tflog.Debug(ctx, "bucket user create api", map[string]any{"retcode": retcode, "retval": string(retval)}) + if err != nil { + return nil, fmt.Errorf("error reading bucket user create response") + } + if retcode != http.StatusOK { + return nil, common.MapHttpError(retcode) + } + + objUser := &ObjectUser{} + if err := json.Unmarshal(retval, objUser); err != nil { + return nil, fmt.Errorf("error parsing bucket user response") + } + tflog.Debug(ctx, "bucket user create api", map[string]any{"retcode": retcode, "ret object": objUser}) + return objUser, nil +} + +func (client *IDCServicesClient) DeleteObjectUserByResourceId(ctx context.Context, userId string) error { + params := struct { + Host string + Cloudaccount string + ResourceId string + }{ + Host: *client.Host, + Cloudaccount: *client.Cloudaccount, + ResourceId: userId, + } + + // Parse the template string with the provided data + parsedURL, err := common.ParseString(deleteObjectStorageUserURL, params) + if err != nil { + return fmt.Errorf("error parsing the url") + } + + retcode, retval, err := common.MakeDeleteAPICall(ctx, parsedURL, *client.Apitoken, nil) + if err != nil { + return fmt.Errorf("error deleting object bucket user by id") + } + + tflog.Debug(ctx, "object bucket user delete api", map[string]any{"retcode": retcode, "retval": string(retval)}) + + if retcode != http.StatusOK { + return common.MapHttpError(retcode) + } + + tflog.Debug(ctx, "object bucket user delete api", map[string]any{"retcode": retcode}) + + return nil +} + +func (client *IDCServicesClient) GetObjectUserByUserId(ctx context.Context, userId string) (*ObjectUser, error) { + params := struct { + Host string + Cloudaccount string + ResourceId string + }{ + Host: *client.Host, + Cloudaccount: *client.Cloudaccount, + ResourceId: userId, + } + + // Parse the template string with the provided data + parsedURL, err := common.ParseString(getObjectStorageUserURL, params) + if err != nil { + return nil, fmt.Errorf("error parsing the url") + } + + retcode, retval, err := common.MakeGetAPICall(ctx, parsedURL, *client.Apitoken, nil) + if err != nil { + return nil, fmt.Errorf("error reading bucket user by id") + } + + if retcode != http.StatusOK { + return nil, common.MapHttpError(retcode) + } + + tflog.Debug(ctx, "object user read api", map[string]any{"retcode": retcode}) + user := ObjectUser{} + if err := json.Unmarshal(retval, &user); err != nil { + return nil, fmt.Errorf("error parsing bucket response") + } + return &user, nil +} diff --git a/pkg/itacservices/sshkeys.go b/pkg/itacservices/sshkeys.go new file mode 100644 index 0000000..0831393 --- /dev/null +++ b/pkg/itacservices/sshkeys.go @@ -0,0 +1,180 @@ +package itacservices + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + + "terraform-provider-intelcloud/pkg/itacservices/common" + + "github.com/hashicorp/terraform-plugin-log/tflog" +) + +var ( + getAllSSHKeysURLByAccount = "{{.Host}}/v1/cloudaccounts/{{.Cloudaccount}}/sshpublickeys" + createSSHKeyURL = "{{.Host}}/v1/cloudaccounts/{{.Cloudaccount}}/sshpublickeys" + getSSHKeyByResourceId = "{{.Host}}/v1/cloudaccounts/{{.Cloudaccount}}/sshpublickeys/id/{{.ResourceId}}" + deleteSSHKeyByResourceId = "{{.Host}}/v1/cloudaccounts/{{.Cloudaccount}}/sshpublickeys/id/{{.ResourceId}}" +) + +type SSHKeys struct { + SSHKey []SSHKey `json:"items"` +} + +type SSHKey struct { + Metadata struct { + ResourceId string `json:"resourceId"` + Cloudaccount string `json:"cloudAccountId"` + Name string `json:"name"` + Description string `json:"description"` + } `json:"metadata"` + Spec struct { + SSHPublicKey string `json:"sshPublicKey"` + OwnerEmail string `json:"ownerEmail"` + } `json:"spec"` +} + +type SSHKeyCreateRequest struct { + Metadata struct { + Name string `json:"name"` + } `json:"metadata"` + Spec struct { + SSHPublicKey string `json:"sshPublicKey"` + OwnerEmail string `json:"ownerEmail"` + } `json:"spec"` +} + +func (client *IDCServicesClient) GetSSHKeys(ctx context.Context) (*SSHKeys, error) { + params := struct { + Host string + Cloudaccount string + }{ + Host: *client.Host, + Cloudaccount: *client.Cloudaccount, + } + + // Parse the template string with the provided data + parsedURL, err := common.ParseString(getAllSSHKeysURLByAccount, params) + if err != nil { + return nil, fmt.Errorf("error parsing the url") + } + + retcode, retval, err := common.MakeGetAPICall(ctx, parsedURL, *client.Apitoken, nil) + tflog.Debug(ctx, "sshkeys read api", map[string]any{"retcode": retcode, "retval": string(retval)}) + if err != nil { + return nil, fmt.Errorf("error reading sshkeys") + } + if retcode != http.StatusOK { + return nil, common.MapHttpError(retcode) + } + + sshkeys := SSHKeys{} + if err := json.Unmarshal(retval, &sshkeys); err != nil { + return nil, fmt.Errorf("error parsing sshkey response") + } + return &sshkeys, nil +} + +func (client *IDCServicesClient) CreateSSHkey(ctx context.Context, in *SSHKeyCreateRequest) (*SSHKey, error) { + params := struct { + Host string + Cloudaccount string + }{ + Host: *client.Host, + Cloudaccount: *client.Cloudaccount, + } + + // Parse the template string with the provided data + parsedURL, err := common.ParseString(createSSHKeyURL, params) + if err != nil { + return nil, fmt.Errorf("error parsing the url") + } + + inArgs, err := json.MarshalIndent(in, "", " ") + if err != nil { + return nil, fmt.Errorf("error parsing input arguments") + } + + tflog.Debug(ctx, "sshkey create api request", map[string]any{"url": parsedURL, "inArgs": string(inArgs)}) + retcode, retval, err := common.MakePOSTAPICall(ctx, parsedURL, *client.Apitoken, inArgs) + tflog.Debug(ctx, "sshkey create api response", map[string]any{"retcode": retcode, "retval": string(retval)}) + if err != nil { + return nil, fmt.Errorf("error reading sshkey create response") + } + + if retcode != http.StatusOK { + return nil, common.MapHttpError(retcode) + } + + sshkey := SSHKey{} + if err := json.Unmarshal(retval, &sshkey); err != nil { + return nil, fmt.Errorf("error parsing sshkey response") + } + return &sshkey, nil +} + +func (client *IDCServicesClient) GetSSHKeyByResourceId(ctx context.Context, resourceId string) (*SSHKey, error) { + params := struct { + Host string + Cloudaccount string + ResourceId string + }{ + Host: *client.Host, + Cloudaccount: *client.Cloudaccount, + ResourceId: resourceId, + } + + // Parse the template string with the provided data + parsedURL, err := common.ParseString(getSSHKeyByResourceId, params) + if err != nil { + return nil, fmt.Errorf("error parsing the url") + } + + retcode, retval, err := common.MakeGetAPICall(ctx, parsedURL, *client.Apitoken, nil) + if err != nil { + return nil, fmt.Errorf("error reading sshkey by resource id") + } + + if retcode != http.StatusOK { + return nil, common.MapHttpError(retcode) + } + + tflog.Debug(ctx, "sshkey read api", map[string]any{"retcode": retcode}) + sshkey := SSHKey{} + if err := json.Unmarshal(retval, &sshkey); err != nil { + return nil, fmt.Errorf("error parsing sshkey response") + } + return &sshkey, nil +} + +func (client *IDCServicesClient) DeleteSSHKeyByResourceId(ctx context.Context, resourceId string) error { + params := struct { + Host string + Cloudaccount string + ResourceId string + }{ + Host: *client.Host, + Cloudaccount: *client.Cloudaccount, + ResourceId: resourceId, + } + + // Parse the template string with the provided data + parsedURL, err := common.ParseString(deleteSSHKeyByResourceId, params) + if err != nil { + return fmt.Errorf("error parsing the url") + } + + retcode, _, err := common.MakeDeleteAPICall(ctx, parsedURL, *client.Apitoken, nil) + if err != nil { + return fmt.Errorf("error deleting sshkey by resource id") + } + + if retcode != http.StatusOK { + return common.MapHttpError(retcode) + } + + tflog.Debug(ctx, "sshkey delete api", map[string]any{"retcode": retcode}) + + return nil +} diff --git a/tools/tools.go b/tools/tools.go new file mode 100644 index 0000000..867d3a2 --- /dev/null +++ b/tools/tools.go @@ -0,0 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build tools + +package tools + +import ( + // Documentation generation + _ "github.com/hashicorp/terraform-plugin-docs/cmd/tfplugindocs" +)