1
0
mirror of https://github.com/rancher/quickstart.git synced 2026-02-05 12:45:15 +01:00

Add NeuVector AWS quickstart (#208)

This restructures the repository slightly to cleanly distinguish between Rancher and NeuVector quickstarts
This commit is contained in:
Bastian Hofmann
2022-06-10 15:46:04 +02:00
committed by GitHub
parent 4de110b73c
commit 05285e8230
95 changed files with 780 additions and 52 deletions

View File

@@ -13,7 +13,7 @@ jobs:
strategy:
matrix:
provider: ['aws', 'azure', 'do', 'gcp', 'hcloud', 'linode', 'scw']
provider: ['rancher/aws', 'rancher/azure', 'rancher/do', 'rancher/gcp', 'rancher/hcloud', 'rancher/linode', 'rancher/scw', 'neuvector/aws']
# Use the Bash shell regardless whether the GitHub Actions runner is ubuntu-latest, macos-latest, or windows-latest
defaults:
@@ -51,7 +51,7 @@ jobs:
run: go install github.com/terraform-docs/terraform-docs@v0.16.0
- name: Validate if terraform-docs generated README.md is up to date
run: terraform-docs -c ../.terraform-docs-readme.yml . --output-check
run: terraform-docs -c ../../.terraform-docs-readme.yml . --output-check
- name: Validate if terraform-docs generated terraform.tfvars.example is up to date
run: terraform-docs -c ../.terraform-docs-tfvars.yml . --output-check
run: terraform-docs -c ../../.terraform-docs-tfvars.yml . --output-check

View File

@@ -1,5 +1,5 @@
PROVIDERS = rancher-common aws azure do gcp hcloud linode scw
CLOUD_PROVIDERS = aws azure do gcp hcloud linode scw
PROVIDERS = rancher/rancher-common rancher/aws rancher/azure rancher/do rancher/gcp rancher/hcloud rancher/linode rancher/scw neuvector/aws
CLOUD_PROVIDERS = rancher/aws rancher/azure rancher/do rancher/gcp rancher/hcloud rancher/linode rancher/scw neuvector/aws
upgrade-targets = $(addprefix upgrade-, $(PROVIDERS))
docs-targets = $(addprefix docs-, $(PROVIDERS))
@@ -14,11 +14,11 @@ $(upgrade-targets): upgrade-%: %
docs: $(docs-targets)
$(docs-targets): docs-%: %
cd $< && terraform-docs -c ../.terraform-docs-readme.yml .
cd $< && terraform-docs -c ../../.terraform-docs-readme.yml .
tfvars: $(tfvars-targets)
$(tfvars-targets): tfvars-%: %
cd $< && terraform-docs -c ../.terraform-docs-tfvars.yml .
cd $< && terraform-docs -c ../../.terraform-docs-tfvars.yml .
fmt: $(fmt-targets)
$(fmt-targets): fmt-%: %
@@ -38,4 +38,4 @@ test:
.PHONY: clean
clean:
rm */terraform.tfstate.backup
rm */*/terraform.tfstate.backup

View File

@@ -1,6 +1,6 @@
# Quickstart examples for Rancher
# Quickstart examples for the SUSE Rancher product portfolio
Quickly stand up an HA-style Rancher management server in your infrastructure provider of choice.
Quickly stand up an HA-style installation of SUSE Rancher products on your infrastructure provider of choice.
Intended for experimentation/evaluation ONLY.
@@ -8,7 +8,26 @@ Intended for experimentation/evaluation ONLY.
As a result, this repository minimizes costs by standing up the minimum required resources for a given provider.
Use Vagrant to run Rancher locally and avoid cloud costs.
## Local quickstart
## Rancher Management Server quickstart
Rancher Management Server Quickstarts are provided for:
### Cloud quickstart
- [**Amazon Web Services** (`aws`)](./rancher/aws)
- [**Microsoft Azure Cloud** (`azure`)](./rancher/azure)
- [**DigitalOcean** (`do`)](./rancher/do)
- [**Google Cloud Platform** (`gcp`)](./rancher/gcp)
- [**Hetzner Cloud** (`hcloud`)](./rancher/hcloud)
- [**Linode** (`linode`)](./rancher/linode)
- [**Scaleway** (`scw`)](./rancher/scw)
**You will be responsible for any and all infrastructure costs incurred by these resources.**
Each quickstart will install Rancher on a single-node K3s cluster, then will provision another single-node workload cluster using a Custom cluster in Rancher.
This setup provides easy access to the core Rancher functionality while establishing a foundation that can be easily expanded to a full HA Rancher server.
### Local quickstart
A local quickstart is provided in the form of Vagrant configuration.
@@ -16,49 +35,41 @@ A local quickstart is provided in the form of Vagrant configuration.
Use this configuration only to evaluate the features of Rancher.
See cloud provider quickstarts for an HA foundation according to Rancher installation best practices.
### Requirements - Vagrant (local)
## NeuVector quickstart
NeuVector Quickstarts are provided for:
- [**Amazon Web Services for NeuVector** (`aws`)](./neuvector/aws)
**You will be responsible for any and all infrastructure costs incurred by these resources.**
Each quickstart will install NeuVector on a single-node RKE2 cluster. Optionally, a Rancher Management Server can be deployed as well.
This setup provides easy access to the NeuVector Rancher functionality while establishing a foundation that can be easily expanded to a full HA NeuVector installation.
## Requirements - Vagrant (local)
- [Vagrant](https://www.vagrantup.com)
- [VirtualBox](https://www.virtualbox.org)
- 6GB unused RAM
### Using Vagrant quickstart
### Using Vagrant quickstarts
See [/vagrant](./vagrant) for details on usage and settings.
## Cloud quickstart
Quickstarts are provided for:
- [**Amazon Web Services** (`aws`)](./aws)
- [**Microsoft Azure Cloud** (`azure`)](./azure)
- [**Microsoft Azure Cloud with Windows nodes** (`azure-windows`)](./azure-windows)
- [**DigitalOcean** (`do`)](./do)
- [**Google Cloud Platform** (`gcp`)](./gcp)
- [**Hetzner Cloud** (`hcloud`)](./hcloud)
- [**Linode** (`linode`)](./linode)
- [**Scaleway** (`scw`)](./scw)
**You will be responsible for any and all infrastructure costs incurred by these resources.**
Each quickstart will install Rancher on a single-node RKE cluster, then will provision another single-node workload cluster using a Custom cluster in Rancher.
This setup provides easy access to the core Rancher functionality while establishing a foundation that can be easily expanded to a full HA Rancher server.
### Requirements - Cloud
## Requirements - Cloud
- Terraform >=1.0.0
- Credentials for the cloud provider used for the quickstart
### Deploy
### Using cloud quickstarts
To begin with any quickstart, perform the following steps:
1. Clone or download this repository to a local folder
1. Choose a cloud provider and navigate into the provider's folder
1. Copy or rename `terraform.tfvars.example` to `terraform.tfvars` and fill in all required variables
1. Run `terraform init`
1. Run `terraform apply`
2. Choose a cloud provider and navigate into the provider's folder
3. Copy or rename `terraform.tfvars.example` to `terraform.tfvars` and fill in all required variables
4. Run `terraform init`
5. Run `terraform apply`
When provisioning has finished, terraform will output the URL to connect to the Rancher server.
Two sets of Kubernetes configurations will also be generated:

141
neuvector/aws/.terraform.lock.hcl generated Normal file
View File

@@ -0,0 +1,141 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/aws" {
version = "4.17.0"
constraints = "4.17.0"
hashes = [
"h1:FtkENM8QDK6CLhBr4k97kx26G2CqHZk9q5sNhHcnYRc=",
"zh:2cc932fb0af13850de3c60a5318b695c82973489c140ca4f13218f69136c36e5",
"zh:4018884d66acfa8273f7100ef0334004ed8a3790ffc7621eaef65d1d9c3fab43",
"zh:6a7769e5c81e543f5deaaa8596e45f92244a61f5026c8c66d5bf55f2a7fd4801",
"zh:7956c1e17ec7647af3612cc98cbcd21d50b2d9f5e41c676b62ee214f5610c29f",
"zh:833d9d608dbffda7da565004ef592a8a364e96b5c13cacf873f5d32714e197ff",
"zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425",
"zh:a55b8b72e47999d9c7aecaa009797ed7eb3f669a719d3f6127ee5e0f1b91ecc2",
"zh:a6f2377d71dfba9669f060e687498e589b490366026821bd83451ac9ef0cd9e8",
"zh:b006aa281097b3db27a62ea3c8cfaf4c4d979d57f4a6c180bd2da3d0ab4bd61a",
"zh:d6a6d29256fee6c3b35384719c84c19b13dcccc53bffce5f529023607d130d0b",
"zh:edc155e147883872e1227aa6a4ef3205fa9de421475d96c20a34a5eaff3df01f",
"zh:f25773dcc00dead0412e222cf3891ac6228dcb4d69da9bacfca305a0a2a1db56",
]
}
provider "registry.terraform.io/hashicorp/helm" {
version = "2.5.1"
constraints = "2.5.1"
hashes = [
"h1:a9KwjqINdNy6IsEbkHUB1vwvYfy5OJ2VxFL9/NDFLoY=",
"zh:140b9748f0ad193a20d69e59d672f3c4eda8a56cede56a92f931bd3af020e2e9",
"zh:17ae319466ed6538ad49e011998bb86565fe0e97bc8b9ad7c8dda46a20f90669",
"zh:3a8bd723c21ba70e19f0395ed7096fc8e08bfc23366f1c3f06a9107eb37c572c",
"zh:3aae3b82adbe6dca52f1a1c8cf51575446e6b0f01f1b1f3b30de578c9af4a933",
"zh:3f65221f40148df57d2888e4f31ef3bf430b8c5af41de0db39a2b964e1826d7c",
"zh:650c74c4f46f5eb01df11d8392bdb7ebee3bba59ac0721000a6ad731ff0e61e2",
"zh:930fb8ab4cd6634472dfd6aa3123f109ef5b32cbe6ef7b4695fae6751353e83f",
"zh:ae57cd4b0be4b9ca252bc5d347bc925e35b0ed74d3dcdebf06c11362c1ac3436",
"zh:d15b1732a8602b6726eac22628b2f72f72d98b75b9c6aabceec9fd696fda696a",
"zh:d730ede1656bd193e2aea5302acec47c4905fe30b96f550196be4a0ed5f41936",
"zh:f010d4f9d8cd15936be4df12bf256cb2175ca1dedb728bd3a866c03d2ee7591f",
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
]
}
provider "registry.terraform.io/hashicorp/kubernetes" {
version = "2.11.0"
constraints = "2.11.0"
hashes = [
"h1:T65SZhN/tQgsAsHe/G5PCgpjofi+aTKPZ+nZg6WOJpc=",
"zh:143a19dd0ea3b07fc5e3d9231f3c2d01f92894385c98a67327de74c76c715843",
"zh:1fc757d209e09c3cf7848e4274daa32408c07743698fbed10ee52a4a479b62b6",
"zh:22dfebd0685749c51a8f765d51a1090a259778960ac1cd4f32021a325b2b9b72",
"zh:3039b3b76e870cd8fc404cf75a29c66b171c6ba9b6182e131b6ae2ca648ec7c0",
"zh:3af0a15562fcab4b5684b18802e0239371b2b8ff9197ed069ff4827f795a002b",
"zh:50aaf20336d1296a73315adb66f7687f75bd5c6b1f93a894b95c75cc142810ec",
"zh:682064fabff895ec351860b4fe0321290bbbb17c2a410b62c9bea0039400650e",
"zh:70ac914d5830b3371a2679d8f77cc20c419a6e12925145afae6c977c8eb90934",
"zh:710aa02cccf7b0f3fb50880d6d2a7a8b8c9435248666616844ba71f74648cddc",
"zh:88e418118cd5afbdec4984944c7ab36950bf48e8d3e09e090232e55eecfb470b",
"zh:9cef159377bf23fa331f8724fdc6ce27ad39a217a4bae6df3b1ca408fc643da6",
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
]
}
provider "registry.terraform.io/hashicorp/local" {
version = "2.2.3"
constraints = "2.2.3"
hashes = [
"h1:KmHz81iYgw9Xn2L3Carc2uAzvFZ1XsE7Js3qlVeC77k=",
"zh:04f0978bb3e052707b8e82e46780c371ac1c66b689b4a23bbc2f58865ab7d5c0",
"zh:6484f1b3e9e3771eb7cc8e8bab8b35f939a55d550b3f4fb2ab141a24269ee6aa",
"zh:78a56d59a013cb0f7eb1c92815d6eb5cf07f8b5f0ae20b96d049e73db915b238",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:8aa9950f4c4db37239bcb62e19910c49e47043f6c8587e5b0396619923657797",
"zh:996beea85f9084a725ff0e6473a4594deb5266727c5f56e9c1c7c62ded6addbb",
"zh:9a7ef7a21f48fabfd145b2e2a4240ca57517ad155017e86a30860d7c0c109de3",
"zh:a63e70ac052aa25120113bcddd50c1f3cfe61f681a93a50cea5595a4b2cc3e1c",
"zh:a6e8d46f94108e049ad85dbed60354236dc0b9b5ec8eabe01c4580280a43d3b8",
"zh:bb112ce7efbfcfa0e65ed97fa245ef348e0fd5bfa5a7e4ab2091a9bd469f0a9e",
"zh:d7bec0da5c094c6955efed100f3fe22fca8866859f87c025be1760feb174d6d9",
"zh:fb9f271b72094d07cef8154cd3d50e9aa818a0ea39130bc193132ad7b23076fd",
]
}
provider "registry.terraform.io/hashicorp/tls" {
version = "3.4.0"
constraints = "3.4.0"
hashes = [
"h1:fSRc/OyRitbAST9vE+mEcmgJiDp+Jx8pGPbUUeYEQRc=",
"zh:2442a0df0cfb550b8eba9b2af39ac06f54b62447eb369ecc6b1c29f739b33bbb",
"zh:3ebb82cacb677a099de55f844f0d02886bc804b1a2b94441bc40fabcb64d2a38",
"zh:436125c2a7e66bc62a4a7c68bdca694f071d7aa894e8637dc83f4a68fe322546",
"zh:5f03db9f1d77e8274ff4750ae32d5c16c42b862b06bcb0683e4d733c8db922e4",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:8190142ae8a539ab34193b7e75da0fa04035d1dcd8af8be94df1eafeeffb44b6",
"zh:8cdc7cd9221e27c189e5beaf78462fce4c2edb081f415a1eafc6da2949de31e2",
"zh:a5de0f7f5d63c59ebf61d3c1d94040f410665ff0aa04f66674efe24b39a11f94",
"zh:a9fce48db3c140cc3e06f8a3c7ef4d36735e457e7660442d6d5dcd2b0781adc3",
"zh:beb92de584c790c7c7f047e45ccd22b6ee3263c7b5a91ae4d6882ae6e7700570",
"zh:f373f8cc52846fb513f44f468d885f722ca4dc22af9ff1942368cafd16b796b3",
"zh:f69627fd6e5a920b17ff423cdbad2715078ca6d13146dc67668795582ab43748",
]
}
provider "registry.terraform.io/loafoe/ssh" {
version = "1.2.0"
constraints = "1.2.0"
hashes = [
"h1:L9AYfDrWk2nVMXTqk30z+iidgz0X3VIxCepQZX2ZSMk=",
"zh:0af31d1d2d4b6a71669a8c18654deb14fa32522ffdef4dfde62ac151f89f389b",
"zh:0d3b7ad68e886936d47626143c42ccb922a34aaab8b1e7d4f42ce077be307765",
"zh:228270701fcc507fb8b8a698451aa26b02c45a976b841e07c973556cf2406e53",
"zh:32de82d3fcd009a6a354c0468769f1442f01836a34f6b4578b4da054f5ecf102",
"zh:3f7fe1dad01cb05c29f1f6e6033891e66a21f2bc2a4fe1da6096b33f8889521a",
"zh:796115f0b374dece64ae47b7fa680d0ffce37a3d837de5c7145a41bf3b7c793f",
"zh:88c63a174435e2b9b6561187ae87cdeebaa9ffb86f61dd546fbd0e1c4beff6d6",
"zh:a73b8573a81be7ca021b4d65883f4058337d1a4d10a56792dc80cacbfea12031",
"zh:c2a45a6cfd301467139a8c1f533fa92163128d35998e7191b68a8fd103160e62",
"zh:ebef9e003bdb977fd51c4260cb350b62e1a93d746971d8a9ad46b43500066a3f",
"zh:ef8e621d7732b120e1af1dfa08f9b983d0d93de28f0e269afa7fd50a66436477",
]
}
provider "registry.terraform.io/rancher/rancher2" {
version = "1.24.0"
constraints = "1.24.0"
hashes = [
"h1:XWrsT6bqmswE61stocRBu0CsuXPRHqs7xS30ktnM2Kg=",
"zh:0278e7eca669b10082c7c0fd2037449e325d5f63db5881fd1a9b0c1cdf3be0bc",
"zh:19ef195b8af98deb2789533b05edf3870d49cdf82d6e07d197e9579bd77a0ffd",
"zh:3b875842e04b8205f018b5fbf481c0cfb69e2d1aae8b4b70323b60b1d03d2a7b",
"zh:6b7d4d6bb9c15fe6af369216afcf78020fdfbfbdebac7b158c8a1cde1278f38b",
"zh:a72d6438b7adfcc2327357bb4137ad65117e87db5ec463d2d9ed4a414d111a5b",
"zh:ad057167ddb5fc2126700328d41ba899db963d653e214c94d4f48b6f1e7b71b4",
"zh:b11dcb4adee3bd5a6a9118fe75709f3fb16b646eb022e21a8ea54d58ba3ebbcd",
"zh:b3516d8531e45cd9084fd12c00b94a0163b8d2cca7571ff68a8fe999a85232a5",
"zh:bc192ac3c7e98d5ad9532dd81ed29eb63c82fe8472dfc86b2807ff6182c95264",
"zh:cea331226092db9d6b7d45739293f2484c8811213636b07ca7c94a5d3592a925",
"zh:f26a9ebadbee87588166e519e1d74d14483a8188acc7b5c61809efb3c72f82c8",
"zh:f6705e74b669538280e9f1c9bce57a296497d7f17a7231dc9aaf95b89b3668a2",
]
}

85
neuvector/aws/README.md Normal file
View File

@@ -0,0 +1,85 @@
# AWS NeuVector Quickstart
This will create a single node RKE2 cluster running on an EC2 instance with SLES 15 and install NeuVector into the cluster.
The instance will have wide-open security groups and will be accessible over SSH using the SSH keys
`id_rsa` and `id_rsa.pub`.
Optionally, you can also deploy the Rancher Management Server into the same cluster to test the Rancher and NeuVector integration.
<!-- BEGIN_TF_DOCS -->
## Requirements
| Name | Version |
|------|---------|
| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 1.0.0 |
| <a name="requirement_aws"></a> [aws](#requirement\_aws) | 4.17.0 |
| <a name="requirement_helm"></a> [helm](#requirement\_helm) | 2.5.1 |
| <a name="requirement_kubernetes"></a> [kubernetes](#requirement\_kubernetes) | 2.11.0 |
| <a name="requirement_local"></a> [local](#requirement\_local) | 2.2.3 |
| <a name="requirement_rancher2"></a> [rancher2](#requirement\_rancher2) | 1.24.0 |
| <a name="requirement_ssh"></a> [ssh](#requirement\_ssh) | 1.2.0 |
| <a name="requirement_tls"></a> [tls](#requirement\_tls) | 3.4.0 |
## Providers
| Name | Version |
|------|---------|
| <a name="provider_aws"></a> [aws](#provider\_aws) | 4.17.0 |
| <a name="provider_helm"></a> [helm](#provider\_helm) | 2.5.1 |
| <a name="provider_local"></a> [local](#provider\_local) | 2.2.3 |
| <a name="provider_rancher2.bootstrap"></a> [rancher2.bootstrap](#provider\_rancher2.bootstrap) | 1.24.0 |
| <a name="provider_ssh"></a> [ssh](#provider\_ssh) | 1.2.0 |
| <a name="provider_tls"></a> [tls](#provider\_tls) | 3.4.0 |
## Modules
No modules.
## Resources
| Name | Type |
|------|------|
| [aws_instance.neuvector_server](https://registry.terraform.io/providers/hashicorp/aws/4.17.0/docs/resources/instance) | resource |
| [aws_key_pair.quickstart_key_pair](https://registry.terraform.io/providers/hashicorp/aws/4.17.0/docs/resources/key_pair) | resource |
| [aws_security_group.neuvector_sg_allowall](https://registry.terraform.io/providers/hashicorp/aws/4.17.0/docs/resources/security_group) | resource |
| [helm_release.cert_manager](https://registry.terraform.io/providers/hashicorp/helm/2.5.1/docs/resources/release) | resource |
| [helm_release.cluster_issuer](https://registry.terraform.io/providers/hashicorp/helm/2.5.1/docs/resources/release) | resource |
| [helm_release.neuvector](https://registry.terraform.io/providers/hashicorp/helm/2.5.1/docs/resources/release) | resource |
| [helm_release.rancher_server](https://registry.terraform.io/providers/hashicorp/helm/2.5.1/docs/resources/release) | resource |
| [local_file.kube_config_server_yaml](https://registry.terraform.io/providers/hashicorp/local/2.2.3/docs/resources/file) | resource |
| [local_file.ssh_public_key_openssh](https://registry.terraform.io/providers/hashicorp/local/2.2.3/docs/resources/file) | resource |
| [local_sensitive_file.ssh_private_key_pem](https://registry.terraform.io/providers/hashicorp/local/2.2.3/docs/resources/sensitive_file) | resource |
| [rancher2_bootstrap.admin](https://registry.terraform.io/providers/rancher/rancher2/1.24.0/docs/resources/bootstrap) | resource |
| [ssh_resource.install_rke2](https://registry.terraform.io/providers/loafoe/ssh/1.2.0/docs/resources/resource) | resource |
| [ssh_resource.retrieve_config](https://registry.terraform.io/providers/loafoe/ssh/1.2.0/docs/resources/resource) | resource |
| [ssh_resource.rke2_config](https://registry.terraform.io/providers/loafoe/ssh/1.2.0/docs/resources/resource) | resource |
| [ssh_resource.rke2_config_dir](https://registry.terraform.io/providers/loafoe/ssh/1.2.0/docs/resources/resource) | resource |
| [tls_private_key.global_key](https://registry.terraform.io/providers/hashicorp/tls/3.4.0/docs/resources/private_key) | resource |
| [aws_ami.sles](https://registry.terraform.io/providers/hashicorp/aws/4.17.0/docs/data-sources/ami) | data source |
## Inputs
| Name | Description | Type | Default | Required |
|------|-------------|------|---------|:--------:|
| <a name="input_aws_access_key"></a> [aws\_access\_key](#input\_aws\_access\_key) | AWS access key used to create infrastructure | `string` | n/a | yes |
| <a name="input_aws_secret_key"></a> [aws\_secret\_key](#input\_aws\_secret\_key) | AWS secret key used to create AWS infrastructure | `string` | n/a | yes |
| <a name="input_neuvector_admin_password"></a> [neuvector\_admin\_password](#input\_neuvector\_admin\_password) | Admin password for NeuVector | `string` | n/a | yes |
| <a name="input_aws_region"></a> [aws\_region](#input\_aws\_region) | AWS region used for all resources | `string` | `"us-east-1"` | no |
| <a name="input_aws_session_token"></a> [aws\_session\_token](#input\_aws\_session\_token) | AWS session token used to create AWS infrastructure | `string` | `""` | no |
| <a name="input_cert_manager_version"></a> [cert\_manager\_version](#input\_cert\_manager\_version) | Version of cert-manager to install alongside NeuVector (format: 0.0.0) | `string` | `"1.7.1"` | no |
| <a name="input_install_rancher"></a> [install\_rancher](#input\_install\_rancher) | Also install Rancher and setup SSO for NeuVector | `bool` | `false` | no |
| <a name="input_instance_type"></a> [instance\_type](#input\_instance\_type) | Instance type used for all EC2 instances | `string` | `"t3a.xlarge"` | no |
| <a name="input_kubernetes_version"></a> [kubernetes\_version](#input\_kubernetes\_version) | Kubernetes version to use | `string` | `"v1.22.10-rc2+rke2r1"` | no |
| <a name="input_neuvector_chart_version"></a> [neuvector\_chart\_version](#input\_neuvector\_chart\_version) | NeuVector helm chart version | `string` | `"2.2.0"` | no |
| <a name="input_prefix"></a> [prefix](#input\_prefix) | Prefix added to names of all resources | `string` | `"neuvector-quickstart"` | no |
| <a name="input_rancher_server_admin_password"></a> [rancher\_server\_admin\_password](#input\_rancher\_server\_admin\_password) | Admin password to use for Rancher server bootstrap | `string` | `"adminadminadmin"` | no |
| <a name="input_rancher_version"></a> [rancher\_version](#input\_rancher\_version) | Rancher version | `string` | `"2.6.5"` | no |
## Outputs
| Name | Description |
|------|-------------|
| <a name="output_neuvector_url"></a> [neuvector\_url](#output\_neuvector\_url) | n/a |
| <a name="output_node_ip"></a> [node\_ip](#output\_node\_ip) | n/a |
| <a name="output_rancher_url"></a> [rancher\_url](#output\_rancher\_url) | n/a |
<!-- END_TF_DOCS -->

View File

@@ -0,0 +1,32 @@
resource "helm_release" "cert_manager" {
depends_on = [local_file.kube_config_server_yaml]
name = "cert-manager"
chart = "https://charts.jetstack.io/charts/cert-manager-v${var.cert_manager_version}.tgz"
namespace = "cert-manager"
create_namespace = true
wait = true
set {
name = "installCRDs"
value = "true"
}
}
resource "helm_release" "cluster_issuer" {
depends_on = [helm_release.cert_manager]
name = "selfsigned-cluster-issuer"
chart = "https://github.com/adfinis-sygroup/helm-charts/releases/download/cert-manager-issuers-0.2.4/cert-manager-issuers-0.2.4.tgz"
namespace = "cert-manager"
create_namespace = true
wait = true
values = [
<<EOT
clusterIssuers:
- spec:
selfSigned: {}
EOT
]
}

25
neuvector/aws/data.tf Normal file
View File

@@ -0,0 +1,25 @@
# Use latest SLES 15 SP3
data "aws_ami" "sles" {
most_recent = true
owners = ["013907871322"] # SUSE
filter {
name = "name"
values = ["suse-sles-15-sp3*"]
}
filter {
name = "virtualization-type"
values = ["hvm"]
}
filter {
name = "architecture"
values = ["x86_64"]
}
filter {
name = "root-device-type"
values = ["ebs"]
}
}

80
neuvector/aws/infra.tf Normal file
View File

@@ -0,0 +1,80 @@
# AWS infrastructure resources
resource "tls_private_key" "global_key" {
algorithm = "RSA"
rsa_bits = 2048
}
resource "local_sensitive_file" "ssh_private_key_pem" {
filename = "${path.module}/id_rsa"
content = tls_private_key.global_key.private_key_pem
file_permission = "0600"
}
resource "local_file" "ssh_public_key_openssh" {
filename = "${path.module}/id_rsa.pub"
content = tls_private_key.global_key.public_key_openssh
}
# Temporary key pair used for SSH accesss
resource "aws_key_pair" "quickstart_key_pair" {
key_name_prefix = "${var.prefix}-neuvector-"
public_key = tls_private_key.global_key.public_key_openssh
}
# Security group to allow all traffic
resource "aws_security_group" "neuvector_sg_allowall" {
name = "${var.prefix}-neuvector-allowall"
description = "NeuVector quickstart - allow all traffic"
ingress {
from_port = "0"
to_port = "0"
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = "0"
to_port = "0"
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Creator = "neuvector-quickstart"
}
}
# AWS EC2 instance for creating a single node RKE cluster and installing the RKE2 cluster and NeuVector
resource "aws_instance" "neuvector_server" {
ami = data.aws_ami.sles.id
instance_type = var.instance_type
key_name = aws_key_pair.quickstart_key_pair.key_name
vpc_security_group_ids = [aws_security_group.neuvector_sg_allowall.id]
root_block_device {
volume_size = 16
}
provisioner "remote-exec" {
inline = [
"echo 'Waiting for cloud-init to complete...'",
"cloud-init status --wait > /dev/null",
"echo 'Completed cloud-init!'",
]
connection {
type = "ssh"
host = self.public_ip
user = local.node_username
private_key = tls_private_key.global_key.private_key_pem
}
}
tags = {
Name = "${var.prefix}-neuvector-server"
Creator = "neuvector-quickstart"
}
}

View File

@@ -0,0 +1,60 @@
resource "helm_release" "neuvector" {
depends_on = [
helm_release.cert_manager,
helm_release.cluster_issuer
]
name = "neuvector"
chart = "https://neuvector.github.io/neuvector-helm/core-${var.neuvector_chart_version}.tgz"
namespace = "cattle-neuvector-system"
create_namespace = true
wait = true
values = [
<<EOT
%{if var.install_rancher}
global:
cattle:
url: https://${local.rancher_hostname}/
%{endif}
controller:
replicas: 1
apisvc:
type: ClusterIP
%{if var.install_rancher}
ranchersso:
enabled: true
%{endif}
secret:
enabled: true
data:
sysinitcfg.yaml:
Cluster_Name: demo
userinitcfg.yaml:
users:
- Fullname: admin
Username: admin
Role: admin
Password: ${var.neuvector_admin_password}
cve:
scanner:
replicas: 1
manager:
ingress:
enabled: true
host: ${local.neuvector_hostname}
annotations:
cert-manager.io/cluster-issuer: cert-manager-issuers
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
tls: true
secretName: neuvector-tls-secret
k3s:
enabled: true
EOT
]
}
locals {
neuvector_hostname = join(".", ["neuvector", aws_instance.neuvector_server.public_ip, "sslip.io"])
}

11
neuvector/aws/output.tf Normal file
View File

@@ -0,0 +1,11 @@
output "neuvector_url" {
value = local.neuvector_hostname
}
output "rancher_url" {
value = var.install_rancher ? local.rancher_hostname : null
}
output "node_ip" {
value = aws_instance.neuvector_server.public_ip
}

54
neuvector/aws/provider.tf Normal file
View File

@@ -0,0 +1,54 @@
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "4.17.0"
}
local = {
source = "hashicorp/local"
version = "2.2.3"
}
tls = {
source = "hashicorp/tls"
version = "3.4.0"
}
ssh = {
source = "loafoe/ssh"
version = "1.2.0"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = "2.11.0"
}
helm = {
source = "hashicorp/helm"
version = "2.5.1"
}
rancher2 = {
source = "rancher/rancher2"
version = "1.24.0"
}
}
required_version = ">= 1.0.0"
}
provider "aws" {
access_key = var.aws_access_key
secret_key = var.aws_secret_key
token = var.aws_session_token
region = var.aws_region
}
provider "helm" {
kubernetes {
config_path = local_file.kube_config_server_yaml.filename
}
}
provider "rancher2" {
alias = "bootstrap"
api_url = "https://${local.rancher_hostname}"
insecure = true
bootstrap = true
}

44
neuvector/aws/rancher.tf Normal file
View File

@@ -0,0 +1,44 @@
resource "helm_release" "rancher_server" {
count = var.install_rancher ? 1 : 0
depends_on = [
helm_release.cert_manager,
]
name = "rancher"
chart = "https://releases.rancher.com/server-charts/latest/rancher-${var.rancher_version}.tgz"
namespace = "cattle-system"
create_namespace = true
wait = true
set {
name = "hostname"
value = local.rancher_hostname
}
set {
name = "replicas"
value = "1"
}
set {
name = "bootstrapPassword"
value = "admin" # TODO: change this once the terraform provider has been updated with the new pw bootstrap logic
}
}
resource "rancher2_bootstrap" "admin" {
count = var.install_rancher ? 1 : 0
depends_on = [
helm_release.rancher_server
]
provider = rancher2.bootstrap
password = var.rancher_server_admin_password
telemetry = true
}
locals {
rancher_hostname = join(".", ["rancher", aws_instance.neuvector_server.public_ip, "sslip.io"])
}

59
neuvector/aws/rke2.tf Normal file
View File

@@ -0,0 +1,59 @@
# RKE2 cluster for NeuVector
resource "ssh_resource" "rke2_config_dir" {
host = aws_instance.neuvector_server.public_ip
commands = [
"sudo mkdir -p /etc/rancher/rke2",
"sudo chmod 777 /etc/rancher/rke2"
]
user = local.node_username
private_key = tls_private_key.global_key.private_key_pem
}
resource "ssh_resource" "rke2_config" {
depends_on = [ssh_resource.rke2_config_dir]
host = aws_instance.neuvector_server.public_ip
user = local.node_username
private_key = tls_private_key.global_key.private_key_pem
file {
content = <<EOT
tls-san:
- ${aws_instance.neuvector_server.public_ip}
node-external-ip: ${aws_instance.neuvector_server.public_ip}
node-ip: ${aws_instance.neuvector_server.private_ip}
EOT
destination = "/etc/rancher/rke2/config.yaml"
permissions = "0644"
}
}
resource "ssh_resource" "install_rke2" {
depends_on = [ssh_resource.rke2_config]
host = aws_instance.neuvector_server.public_ip
commands = [
"sudo bash -c 'curl https://get.rke2.io | INSTALL_RKE2_VERSION=${var.kubernetes_version} sh -'",
"sudo systemctl enable rke2-server",
"sudo systemctl start rke2-server",
"sleep 120", # wait until the ingress controller, including the validating webhook is available, otherwise the installation of charts with ingresses may fail
"sudo /var/lib/rancher/rke2/bin/kubectl --kubeconfig /etc/rancher/rke2/rke2.yaml rollout status daemonset rke2-ingress-nginx-controller -n kube-system"
]
user = local.node_username
private_key = tls_private_key.global_key.private_key_pem
}
resource "ssh_resource" "retrieve_config" {
depends_on = [
ssh_resource.install_rke2
]
host = aws_instance.neuvector_server.public_ip
commands = [
"sudo sed \"s/127.0.0.1/${aws_instance.neuvector_server.public_ip}/g\" /etc/rancher/rke2/rke2.yaml"
]
user = local.node_username
private_key = tls_private_key.global_key.private_key_pem
}
# Save kubeconfig file for interacting with the RKE cluster on your local machine
resource "local_file" "kube_config_server_yaml" {
filename = format("%s/%s", path.root, "kube_config.yaml")
content = ssh_resource.retrieve_config.result
}

View File

@@ -0,0 +1,39 @@
# AWS access key used to create infrastructure
aws_access_key = ""
# AWS secret key used to create AWS infrastructure
aws_secret_key = ""
# Admin password for NeuVector
neuvector_admin_password = ""
# AWS region used for all resources
aws_region = "us-east-1"
# AWS session token used to create AWS infrastructure
aws_session_token = ""
# Version of cert-manager to install alongside NeuVector (format: 0.0.0)
cert_manager_version = "1.7.1"
# Also install Rancher and setup SSO for NeuVector
install_rancher = false
# Instance type used for all EC2 instances
instance_type = "t3a.xlarge"
# Kubernetes version to use
kubernetes_version = "v1.22.10-rc2+rke2r1"
# NeuVector helm chart version
neuvector_chart_version = "2.2.0"
# Prefix added to names of all resources
prefix = "neuvector-quickstart"
# Admin password to use for Rancher server bootstrap
rancher_server_admin_password = "adminadminadmin"
# Rancher version
rancher_version = "2.6.5"

View File

@@ -0,0 +1,83 @@
# Required
variable "aws_access_key" {
type = string
description = "AWS access key used to create infrastructure"
}
# Required
variable "aws_secret_key" {
type = string
description = "AWS secret key used to create AWS infrastructure"
}
# Required
variable "neuvector_admin_password" {
type = string
description = "Admin password for NeuVector"
}
variable "install_rancher" {
type = bool
default = false
description = "Also install Rancher and setup SSO for NeuVector"
}
variable "rancher_server_admin_password" {
type = string
description = "Admin password to use for Rancher server bootstrap"
default = "adminadminadmin"
}
variable "aws_session_token" {
type = string
description = "AWS session token used to create AWS infrastructure"
default = ""
}
variable "aws_region" {
type = string
description = "AWS region used for all resources"
default = "us-east-1"
}
variable "prefix" {
type = string
description = "Prefix added to names of all resources"
default = "neuvector-quickstart"
}
variable "instance_type" {
type = string
description = "Instance type used for all EC2 instances"
default = "t3a.xlarge"
}
variable "kubernetes_version" {
type = string
description = "Kubernetes version to use"
default = "v1.22.10-rc2+rke2r1"
}
variable "neuvector_chart_version" {
type = string
description = "NeuVector helm chart version"
default = "2.2.0"
}
variable "cert_manager_version" {
type = string
description = "Version of cert-manager to install alongside NeuVector (format: 0.0.0)"
default = "1.7.1"
}
variable "rancher_version" {
type = string
description = "Rancher version"
default = "2.6.5"
}
# Local variables used to reduce repetition
locals {
node_username = "ec2-user"
}

View File

@@ -14,32 +14,36 @@ import (
"k8s.io/apimachinery/pkg/util/rand"
)
func TestE2E_Aws(t *testing.T) {
runTerraformAndVerify(t, "../aws")
func TestE2E_RancherAws(t *testing.T) {
runTerraformAndVerify(t, "../rancher/aws")
}
func TestE2E_Azure(t *testing.T) {
runTerraformAndVerify(t, "../azure")
func TestE2E_RancherAzure(t *testing.T) {
runTerraformAndVerify(t, "../rancher/azure")
}
func TestE2E_Do(t *testing.T) {
runTerraformAndVerify(t, "../do")
func TestE2E_RancherDo(t *testing.T) {
runTerraformAndVerify(t, "../rancher/do")
}
func TestE2E_Gcp(t *testing.T) {
runTerraformAndVerify(t, "../gcp")
func TestE2E_RancherGcp(t *testing.T) {
runTerraformAndVerify(t, "../rancher/gcp")
}
func TestE2E_Hcloud(t *testing.T) {
runTerraformAndVerify(t, "../hcloud")
func TestE2E_RancherHcloud(t *testing.T) {
runTerraformAndVerify(t, "../rancher/hcloud")
}
func TestE2E_Linode(t *testing.T) {
runTerraformAndVerify(t, "../linode")
func TestE2E_RancherLinode(t *testing.T) {
runTerraformAndVerify(t, "../rancher/linode")
}
func TestE2E_Scw(t *testing.T) {
runTerraformAndVerify(t, "../scw")
func TestE2E_RancherScw(t *testing.T) {
runTerraformAndVerify(t, "../rancher/scw")
}
func TestE2E_NeuVectorAws(t *testing.T) {
runTerraformAndVerify(t, "../neuvector/aws")
}
func runTerraformAndVerify(t *testing.T, terraformDir string) {