diff --git a/.github/workflows/build_awshelper.yaml b/.github/workflows/build_awshelper.yaml
new file mode 100644
index 000000000..36b5745db
--- /dev/null
+++ b/.github/workflows/build_awshelper.yaml
@@ -0,0 +1,19 @@
+name: Build awshelper image
+
+# Always build this image because it contains all the cloud-automation files.
+# Some jobs depend on arbitrary files and we need to test them with updated awshelper images.
+on: push
+
+jobs:
+ awshelper:
+ name: awshelper
+ uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master
+ with:
+ DOCKERFILE_LOCATION: "./Docker/awshelper/Dockerfile"
+ OVERRIDE_REPO_NAME: "awshelper"
+ secrets:
+ ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }}
+ ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }}
+ QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }}
+ QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }}
+
diff --git a/.github/workflows/build_python3.10.yaml b/.github/workflows/build_python3.10.yaml
new file mode 100644
index 000000000..80d2d7623
--- /dev/null
+++ b/.github/workflows/build_python3.10.yaml
@@ -0,0 +1,23 @@
+name: Build Python 3.10 image
+
+on:
+ push:
+ paths:
+ - .github/workflows/build_python3.10.yaml
+ - Docker/python-nginx/python3.10-buster/**
+
+jobs:
+ python_3-10:
+ name: Python 3.10
+ uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master
+ with:
+ DOCKERFILE_LOCATION: "./Docker/python-nginx/python3.10-buster/Dockerfile"
+ DOCKERFILE_BUILD_CONTEXT: "./Docker/python-nginx/python3.10-buster"
+ OVERRIDE_REPO_NAME: "python"
+ OVERRIDE_TAG_NAME: "python3.10-buster-$(echo ${GITHUB_REF#refs/*/} | tr / _)"
+ secrets:
+ ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }}
+ ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }}
+ QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }}
+ QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }}
+
diff --git a/.github/workflows/build_python3.9.yaml b/.github/workflows/build_python3.9.yaml
new file mode 100644
index 000000000..540e0d4ec
--- /dev/null
+++ b/.github/workflows/build_python3.9.yaml
@@ -0,0 +1,23 @@
+name: Build Python 3.9 image
+
+on:
+ push:
+ paths:
+ - .github/workflows/build_python3.9.yaml
+ - Docker/python-nginx/python3.9-buster/**
+
+jobs:
+ python_3-9:
+ name: Python 3.9
+ uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master
+ with:
+ DOCKERFILE_LOCATION: "./Docker/python-nginx/python3.9-buster/Dockerfile"
+ DOCKERFILE_BUILD_CONTEXT: "./Docker/python-nginx/python3.9-buster"
+ OVERRIDE_REPO_NAME: "python"
+ OVERRIDE_TAG_NAME: "python3.9-buster-$(echo ${GITHUB_REF#refs/*/} | tr / _)"
+ secrets:
+ ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }}
+ ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }}
+ QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }}
+ QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }}
+
diff --git a/.github/workflows/image_build_push.yaml b/.github/workflows/image_build_push.yaml
deleted file mode 100644
index 51543f0fe..000000000
--- a/.github/workflows/image_build_push.yaml
+++ /dev/null
@@ -1,42 +0,0 @@
-name: Build Python Base Images and Push to Quay and ECR
-
-on: push
-
-jobs:
- python_3-9:
- name: Python 3.9 Build and Push
- uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master
- with:
- DOCKERFILE_LOCATION: "./Docker/python-nginx/python3.9-buster/Dockerfile"
- DOCKERFILE_BUILD_CONTEXT: "./Docker/python-nginx/python3.9-buster"
- OVERRIDE_REPO_NAME: "python"
- OVERRIDE_TAG_NAME: "python3.9-buster-$(echo ${GITHUB_REF#refs/*/} | tr / _)"
- secrets:
- ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }}
- ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }}
- QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }}
- QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }}
- python_3-10:
- name: Python 3.10 Build and Push
- uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master
- with:
- DOCKERFILE_LOCATION: "./Docker/python-nginx/python3.10-buster/Dockerfile"
- DOCKERFILE_BUILD_CONTEXT: "./Docker/python-nginx/python3.10-buster"
- OVERRIDE_REPO_NAME: "python"
- OVERRIDE_TAG_NAME: "python3.10-buster-$(echo ${GITHUB_REF#refs/*/} | tr / _)"
- secrets:
- ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }}
- ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }}
- QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }}
- QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }}
- awshelper:
- name: AwsHelper Build and Push
- uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master
- with:
- DOCKERFILE_LOCATION: "./Docker/awshelper/Dockerfile"
- OVERRIDE_REPO_NAME: "awshelper"
- secrets:
- ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }}
- ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }}
- QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }}
- QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }}
diff --git a/.github/workflows/image_build_push_jenkins.yaml b/.github/workflows/image_build_push_jenkins.yaml
index ffea50ace..094417fe5 100644
--- a/.github/workflows/image_build_push_jenkins.yaml
+++ b/.github/workflows/image_build_push_jenkins.yaml
@@ -1,58 +1,63 @@
-name: Build Jenkins images and push to Quay
+name: Build Jenkins images
on:
push:
paths:
+ - .github/workflows/image_build_push_jenkins.yaml
- Docker/jenkins/**
jobs:
jenkins:
- name: Jenkins Build and Push
+ name: Jenkins
uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master
with:
DOCKERFILE_LOCATION: "./Docker/jenkins/Jenkins/Dockerfile"
DOCKERFILE_BUILD_CONTEXT: "./Docker/jenkins/Jenkins"
OVERRIDE_REPO_NAME: "jenkins"
USE_QUAY_ONLY: true
+ BUILD_PLATFORMS: "linux/amd64"
secrets:
ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }}
ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }}
QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }}
QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }}
jenkins2:
- name: Jenkins2 Build and Push
+ name: Jenkins2
uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master
with:
DOCKERFILE_LOCATION: "./Docker/jenkins/Jenkins2/Dockerfile"
DOCKERFILE_BUILD_CONTEXT: "./Docker/jenkins/Jenkins2"
OVERRIDE_REPO_NAME: "jenkins2"
USE_QUAY_ONLY: true
+ BUILD_PLATFORMS: "linux/amd64"
secrets:
ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }}
ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }}
QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }}
QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }}
jenkins-ci-worker:
- name: Jenkins-CI-Worker Build and Push
+ name: Jenkins-CI-Worker
uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master
with:
DOCKERFILE_LOCATION: "./Docker/jenkins/Jenkins-CI-Worker/Dockerfile"
DOCKERFILE_BUILD_CONTEXT: "./Docker/jenkins/Jenkins-CI-Worker"
OVERRIDE_REPO_NAME: "gen3-ci-worker"
USE_QUAY_ONLY: true
+ BUILD_PLATFORMS: "linux/amd64"
secrets:
ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }}
ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }}
QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }}
QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }}
jenkins-qa-worker:
- name: Jenkins-QA-Worker Build and Push
+ name: Jenkins-QA-Worker
uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master
with:
DOCKERFILE_LOCATION: "./Docker/jenkins/Jenkins-Worker/Dockerfile"
DOCKERFILE_BUILD_CONTEXT: "./Docker/jenkins/Jenkins-Worker"
OVERRIDE_REPO_NAME: "gen3-qa-worker"
USE_QUAY_ONLY: true
+ BUILD_PLATFORMS: "linux/amd64"
secrets:
ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }}
ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }}
diff --git a/.github/workflows/image_build_push_squid.yaml b/.github/workflows/image_build_push_squid.yaml
new file mode 100644
index 000000000..ce1761d3c
--- /dev/null
+++ b/.github/workflows/image_build_push_squid.yaml
@@ -0,0 +1,22 @@
+name: Build Squid images
+
+on:
+ push:
+ paths:
+ - .github/workflows/image_build_push_squid.yaml
+ - Docker/squid/**
+
+jobs:
+ squid:
+ name: Squid image
+ uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master
+ with:
+ DOCKERFILE_LOCATION: "./Docker/squid/Dockerfile"
+ DOCKERFILE_BUILD_CONTEXT: "./Docker/squid"
+ OVERRIDE_REPO_NAME: "squid"
+ USE_QUAY_ONLY: true
+ secrets:
+ ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }}
+ ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }}
+ QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }}
+ QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }}
diff --git a/.gitignore b/.gitignore
index dbce5bd82..299bdc807 100644
--- a/.gitignore
+++ b/.gitignore
@@ -14,6 +14,7 @@ terraform
*~
*.swp
.DS_Store
+.dccache
kube/services/fluentd/varlogs/
kube/services/fluentd/dockerlogs/
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 2e3ce795b..82034495d 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,6 +1,6 @@
repos:
- repo: git@github.com:Yelp/detect-secrets
- rev: v0.13.1
+ rev: v1.4.0
hooks:
- id: detect-secrets
args: ['--baseline', '.secrets.baseline']
diff --git a/.secrets.baseline b/.secrets.baseline
index 8d7d9afb8..0c4eba0a8 100644
--- a/.secrets.baseline
+++ b/.secrets.baseline
@@ -1,19 +1,18 @@
{
- "exclude": {
- "files": "^.secrets.baseline$",
- "lines": null
- },
- "generated_at": "2022-11-17T21:04:51Z",
+ "version": "1.4.0",
"plugins_used": [
+ {
+ "name": "ArtifactoryDetector"
+ },
{
"name": "AWSKeyDetector"
},
{
- "name": "ArtifactoryDetector"
+ "name": "AzureStorageKeyDetector"
},
{
- "base64_limit": 4.5,
- "name": "Base64HighEntropyString"
+ "name": "Base64HighEntropyString",
+ "limit": 4.5
},
{
"name": "BasicAuthDetector"
@@ -22,8 +21,14 @@
"name": "CloudantDetector"
},
{
- "hex_limit": 3,
- "name": "HexHighEntropyString"
+ "name": "DiscordBotTokenDetector"
+ },
+ {
+ "name": "GitHubTokenDetector"
+ },
+ {
+ "name": "HexHighEntropyString",
+ "limit": 3.0
},
{
"name": "IbmCloudIamDetector"
@@ -35,21 +40,30 @@
"name": "JwtTokenDetector"
},
{
- "keyword_exclude": null,
- "name": "KeywordDetector"
+ "name": "KeywordDetector",
+ "keyword_exclude": ""
},
{
"name": "MailchimpDetector"
},
+ {
+ "name": "NpmDetector"
+ },
{
"name": "PrivateKeyDetector"
},
+ {
+ "name": "SendGridDetector"
+ },
{
"name": "SlackDetector"
},
{
"name": "SoftlayerDetector"
},
+ {
+ "name": "SquareOAuthDetector"
+ },
{
"name": "StripeDetector"
},
@@ -57,2451 +71,3671 @@
"name": "TwilioKeyDetector"
}
],
+ "filters_used": [
+ {
+ "path": "detect_secrets.filters.allowlist.is_line_allowlisted"
+ },
+ {
+ "path": "detect_secrets.filters.common.is_baseline_file",
+ "filename": ".secrets.baseline"
+ },
+ {
+ "path": "detect_secrets.filters.common.is_ignored_due_to_verification_policies",
+ "min_level": 2
+ },
+ {
+ "path": "detect_secrets.filters.heuristic.is_indirect_reference"
+ },
+ {
+ "path": "detect_secrets.filters.heuristic.is_likely_id_string"
+ },
+ {
+ "path": "detect_secrets.filters.heuristic.is_lock_file"
+ },
+ {
+ "path": "detect_secrets.filters.heuristic.is_not_alphanumeric_string"
+ },
+ {
+ "path": "detect_secrets.filters.heuristic.is_potential_uuid"
+ },
+ {
+ "path": "detect_secrets.filters.heuristic.is_prefixed_with_dollar_sign"
+ },
+ {
+ "path": "detect_secrets.filters.heuristic.is_sequential_string"
+ },
+ {
+ "path": "detect_secrets.filters.heuristic.is_swagger_file"
+ },
+ {
+ "path": "detect_secrets.filters.heuristic.is_templated_secret"
+ }
+ ],
"results": {
"Chef/repo/data_bags/README.md": [
{
- "hashed_secret": "8a9250639e092d90f164792e35073a9395bff366",
- "is_secret": false,
- "is_verified": false,
- "line_number": 45,
- "type": "Secret Keyword"
- },
- {
+ "type": "Secret Keyword",
+ "filename": "Chef/repo/data_bags/README.md",
"hashed_secret": "6367c48dd193d56ea7b0baad25b19455e529f5ee",
- "is_secret": false,
"is_verified": false,
- "line_number": 51,
- "type": "Secret Keyword"
+ "line_number": 38
}
],
- "Docker/Jenkins-CI-Worker/Dockerfile": [
+ "Docker/sidecar/service.key": [
{
- "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603",
- "is_secret": false,
+ "type": "Private Key",
+ "filename": "Docker/sidecar/service.key",
+ "hashed_secret": "1348b145fa1a555461c1b790a2f66614781091e9",
"is_verified": false,
- "line_number": 122,
- "type": "Secret Keyword"
+ "line_number": 1
}
],
- "Docker/Jenkins-Worker/Dockerfile": [
+ "Jenkins/Stacks/Jenkins/jenkins.env.sample": [
{
- "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "Jenkins/Stacks/Jenkins/jenkins.env.sample",
+ "hashed_secret": "f41a52528dd2d592d2c05de5f388101c2948aa98",
"is_verified": false,
- "line_number": 136,
- "type": "Secret Keyword"
+ "line_number": 5
}
],
- "Docker/Jenkins/Dockerfile": [
+ "Jenkinsfile": [
{
- "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "Jenkinsfile",
+ "hashed_secret": "c937b6fbb346a51ef679dd02ac5c4863e02bfdbf",
"is_verified": false,
- "line_number": 110,
- "type": "Secret Keyword"
- }
- ],
- "Docker/Jenkins2/Dockerfile": [
+ "line_number": 144
+ },
{
- "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "Jenkinsfile",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
"is_verified": false,
- "line_number": 113,
- "type": "Secret Keyword"
+ "line_number": 147
}
],
- "Docker/sidecar/service.key": [
+ "ansible/roles/slurm/README.md": [
{
- "hashed_secret": "1348b145fa1a555461c1b790a2f66614781091e9",
- "is_secret": false,
+ "type": "Base64 High Entropy String",
+ "filename": "ansible/roles/slurm/README.md",
+ "hashed_secret": "4acfde1ff9c353ba2ef0dbe0df73bda2743cba42",
"is_verified": false,
- "line_number": 1,
- "type": "Private Key"
+ "line_number": 86
}
],
- "Jenkins/Stacks/Jenkins/jenkins.env.sample": [
+ "apis_configs/fence_settings.py": [
{
- "hashed_secret": "eecee33686ac5861c2a7edc8b46bd0e5432bfddd",
- "is_secret": false,
+ "type": "Basic Auth Credentials",
+ "filename": "apis_configs/fence_settings.py",
+ "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3",
"is_verified": false,
- "line_number": 5,
- "type": "Secret Keyword"
+ "line_number": 80
}
],
- "ansible/roles/awslogs/defaults/main.yaml": [
+ "apis_configs/peregrine_settings.py": [
{
- "hashed_secret": "9d4e1e23bd5b727046a9e3b4b7db57bd8d6ee684",
- "is_secret": false,
+ "type": "Basic Auth Credentials",
+ "filename": "apis_configs/peregrine_settings.py",
+ "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3",
"is_verified": false,
- "line_number": 30,
- "type": "Basic Auth Credentials"
+ "line_number": 46
}
],
- "ansible/roles/slurm/README.md": [
- {
- "hashed_secret": "4acfde1ff9c353ba2ef0dbe0df73bda2743cba42",
- "is_secret": false,
- "is_verified": false,
- "line_number": 86,
- "type": "Base64 High Entropy String"
- },
+ "apis_configs/sheepdog_settings.py": [
{
- "hashed_secret": "579649582303921502d9e6d3f8755f13fdd2b476",
- "is_secret": false,
+ "type": "Basic Auth Credentials",
+ "filename": "apis_configs/sheepdog_settings.py",
+ "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3",
"is_verified": false,
- "line_number": 86,
- "type": "Secret Keyword"
+ "line_number": 46
}
],
- "apis_configs/config_helper.py": [
+ "aws-inspec/kubernetes/chef_inspec-cron.yaml": [
{
- "hashed_secret": "bf21a9e8fbc5a3846fb05b4fa0859e0917b2202f",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "aws-inspec/kubernetes/chef_inspec-cron.yaml",
+ "hashed_secret": "a3ba27250861948a554629a0e21168821ddfa9f1",
"is_verified": false,
- "line_number": 66,
- "type": "Basic Auth Credentials"
+ "line_number": 35
}
],
- "apis_configs/fence_credentials.json": [
+ "doc/api.md": [
{
- "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c",
- "is_secret": false,
+ "type": "Hex High Entropy String",
+ "filename": "doc/api.md",
+ "hashed_secret": "625de83a7517422051911680cc803921ff99db90",
"is_verified": false,
- "line_number": 23,
- "type": "Secret Keyword"
+ "line_number": 47
}
],
- "apis_configs/fence_settings.py": [
+ "doc/gen3OnK8s.md": [
{
- "hashed_secret": "3ef0fb8a603abdc0b6caac44a23fdc6792f77ddf",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "doc/gen3OnK8s.md",
+ "hashed_secret": "55c100ba37d2df35ec1e5f5d6302f060387df6cc",
"is_verified": false,
- "line_number": 6,
- "type": "Basic Auth Credentials"
+ "line_number": 113
},
{
- "hashed_secret": "b60d121b438a380c343d5ec3c2037564b82ffef3",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "doc/gen3OnK8s.md",
+ "hashed_secret": "262d8e9b8ac5f06e7612dfb608f7267f88679801",
"is_verified": false,
- "line_number": 58,
- "type": "Secret Keyword"
+ "line_number": 120
},
{
- "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "doc/gen3OnK8s.md",
+ "hashed_secret": "1c17e556736c4d23933f99d199e7c2c572895fd2",
+ "is_verified": false,
+ "line_number": 143
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "doc/gen3OnK8s.md",
+ "hashed_secret": "76a4acaf31b815aa2c41cc2a2176b11fa9edf00a",
+ "is_verified": false,
+ "line_number": 145
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "doc/gen3OnK8s.md",
+ "hashed_secret": "9d678cbce5a343920f754d5836f03346ee01cde5",
"is_verified": false,
- "line_number": 80,
- "type": "Basic Auth Credentials"
+ "line_number": 154
}
],
- "apis_configs/indexd_settings.py": [
+ "files/scripts/psql-fips-fix.sh": [
{
- "hashed_secret": "0a0d18c85e096611b5685b62bc60ec534d19bacc",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "files/scripts/psql-fips-fix.sh",
+ "hashed_secret": "2f1aa1e2a58704b452a5dd60ab1bd2b761bf296a",
"is_verified": false,
- "line_number": 59,
- "type": "Basic Auth Credentials"
+ "line_number": 9
}
],
- "apis_configs/peregrine_settings.py": [
+ "gen3/bin/bucket-manifest.sh": [
{
- "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "gen3/bin/bucket-manifest.sh",
+ "hashed_secret": "2be88ca4242c76e8253ac62474851065032d6833",
"is_verified": false,
- "line_number": 46,
- "type": "Basic Auth Credentials"
+ "line_number": 58
}
],
- "apis_configs/sheepdog_settings.py": [
+ "gen3/bin/bucket-replicate.sh": [
{
- "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "gen3/bin/bucket-replicate.sh",
+ "hashed_secret": "2be88ca4242c76e8253ac62474851065032d6833",
"is_verified": false,
- "line_number": 46,
- "type": "Basic Auth Credentials"
+ "line_number": 39
}
],
- "doc/Gen3-data-upload.md": [
+ "gen3/bin/secrets.sh": [
{
- "hashed_secret": "b8bd20d4a2701dc3aba0efbbf325f1359392d93e",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "gen3/bin/secrets.sh",
+ "hashed_secret": "fb6220478aaba649aac37271a1d7c6317abc03a6",
"is_verified": false,
- "line_number": 26,
- "type": "Secret Keyword"
+ "line_number": 135
}
],
- "doc/api.md": [
+ "gen3/lib/aws.sh": [
{
- "hashed_secret": "625de83a7517422051911680cc803921ff99db90",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/aws.sh",
+ "hashed_secret": "6b44a330b450ee550c081410c6b705dfeaa105ce",
"is_verified": false,
- "line_number": 47,
- "type": "Hex High Entropy String"
+ "line_number": 640
}
],
- "doc/gen3OnK8s.md": [
+ "gen3/lib/bootstrap/templates/Gen3Secrets/apis_configs/fence-config.yaml": [
{
- "hashed_secret": "2db6d21d365f544f7ca3bcfb443ac96898a7a069",
- "is_secret": false,
+ "type": "Basic Auth Credentials",
+ "filename": "gen3/lib/bootstrap/templates/Gen3Secrets/apis_configs/fence-config.yaml",
+ "hashed_secret": "a94a8fe5ccb19ba61c4c0873d391e987982fbbd3",
"is_verified": false,
- "line_number": 113,
- "type": "Secret Keyword"
- },
+ "line_number": 33
+ }
+ ],
+ "gen3/lib/bootstrap/templates/cdis-manifest/manifests/sower/sower.json": [
{
- "hashed_secret": "ff9ee043d85595eb255c05dfe32ece02a53efbb2",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/bootstrap/templates/cdis-manifest/manifests/sower/sower.json",
+ "hashed_secret": "0447a636536df0264b2000403fbefd69f603ceb1",
"is_verified": false,
- "line_number": 143,
- "type": "Secret Keyword"
+ "line_number": 54
},
{
- "hashed_secret": "70374248fd7129088fef42b8f568443f6dce3a48",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/bootstrap/templates/cdis-manifest/manifests/sower/sower.json",
+ "hashed_secret": "ca253d1c9dece2da0d6fb24ded7bdb849a475966",
"is_verified": false,
- "line_number": 170,
- "type": "Secret Keyword"
+ "line_number": 60
},
{
- "hashed_secret": "bcf22dfc6fb76b7366b1f1675baf2332a0e6a7ce",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/bootstrap/templates/cdis-manifest/manifests/sower/sower.json",
+ "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc",
"is_verified": false,
- "line_number": 189,
- "type": "Secret Keyword"
+ "line_number": 108
}
],
- "doc/kube-setup-data-ingestion-job.md": [
+ "gen3/lib/onprem.sh": [
{
- "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/onprem.sh",
+ "hashed_secret": "29e52a9bac8f274fa41c51fce9c98eba0dd99cb3",
"is_verified": false,
- "line_number": 30,
- "type": "Secret Keyword"
- }
- ],
- "doc/logs.md": [
+ "line_number": 68
+ },
{
- "hashed_secret": "9addbf544119efa4a64223b649750a510f0d463f",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/onprem.sh",
+ "hashed_secret": "50f013532a9770a2c2cfdc38b7581dd01df69b70",
"is_verified": false,
- "line_number": 6,
- "type": "Secret Keyword"
+ "line_number": 84
}
],
- "doc/slurm_cluster.md": [
+ "gen3/lib/testData/default/expectedFenceResult.yaml": [
{
- "hashed_secret": "2ace62c1befa19e3ea37dd52be9f6d508c5163e6",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/default/expectedFenceResult.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
"is_verified": false,
- "line_number": 184,
- "type": "Secret Keyword"
- }
- ],
- "files/dashboard/usage-reports/package-lock.json": [
+ "line_number": 68
+ },
{
- "hashed_secret": "65ecd0650541b6caecdb6986f1871c2e6a95bdfe",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/default/expectedFenceResult.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
"is_verified": false,
- "line_number": 10,
- "type": "Base64 High Entropy String"
+ "line_number": 71
},
{
- "hashed_secret": "e35a49e53bb97044b35cc0e4d963b4ac49e9ac7e",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/default/expectedFenceResult.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
"is_verified": false,
- "line_number": 15,
- "type": "Base64 High Entropy String"
- }
- ],
- "gen3/bin/api.sh": [
+ "line_number": 74
+ },
{
- "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/default/expectedFenceResult.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
"is_verified": false,
- "line_number": 407,
- "type": "Secret Keyword"
+ "line_number": 84
},
{
- "hashed_secret": "e7064f0b80f61dbc65915311032d27baa569ae2a",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/default/expectedFenceResult.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
"is_verified": false,
- "line_number": 477,
- "type": "Secret Keyword"
- }
- ],
- "gen3/bin/kube-dev-namespace.sh": [
+ "line_number": 87
+ },
{
- "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/default/expectedFenceResult.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
"is_verified": false,
- "line_number": 135,
- "type": "Secret Keyword"
- }
- ],
- "gen3/bin/kube-setup-argo.sh": [
+ "line_number": 90
+ },
{
- "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/default/expectedFenceResult.yaml",
+ "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457",
"is_verified": false,
- "line_number": 182,
- "type": "Secret Keyword"
- }
- ],
- "gen3/bin/kube-setup-certs.sh": [
+ "line_number": 93
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/default/expectedFenceResult.yaml",
+ "hashed_secret": "0849046cdafcdb17f5a4bf5c528430d5e04ad295",
+ "is_verified": false,
+ "line_number": 96
+ },
{
- "hashed_secret": "2e9ee120fd25e31048598693aca91d5473898a99",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/default/expectedFenceResult.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
"is_verified": false,
- "line_number": 50,
- "type": "Secret Keyword"
+ "line_number": 99
}
],
- "gen3/bin/kube-setup-dashboard.sh": [
+ "gen3/lib/testData/default/expectedSheepdogResult.yaml": [
{
- "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/default/expectedSheepdogResult.yaml",
+ "hashed_secret": "ec9c944c51e87322de8d22e3ca9e2be1ad8fee0d",
"is_verified": false,
- "line_number": 40,
- "type": "Secret Keyword"
+ "line_number": 60
},
{
- "hashed_secret": "e7064f0b80f61dbc65915311032d27baa569ae2a",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/default/expectedSheepdogResult.yaml",
+ "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc",
"is_verified": false,
- "line_number": 41,
- "type": "Secret Keyword"
- }
- ],
- "gen3/bin/kube-setup-data-ingestion-job.sh": [
+ "line_number": 63
+ },
{
- "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/default/expectedSheepdogResult.yaml",
+ "hashed_secret": "e43756046ad1763d6946575fed0e05130a154bd2",
"is_verified": false,
- "line_number": 37,
- "type": "Secret Keyword"
+ "line_number": 69
},
{
- "hashed_secret": "8695a632956b1b0ea7b66993dcc98732da39148c",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/default/expectedSheepdogResult.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
"is_verified": false,
- "line_number": 102,
- "type": "Secret Keyword"
+ "line_number": 72
}
],
- "gen3/bin/kube-setup-dicom-server.sh": [
+ "gen3/lib/testData/etlconvert/expected2.yaml": [
{
- "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f",
- "is_secret": false,
+ "type": "Base64 High Entropy String",
+ "filename": "gen3/lib/testData/etlconvert/expected2.yaml",
+ "hashed_secret": "fe54e5e937d642307ec155b47ac8a214cb40d474",
"is_verified": false,
- "line_number": 43,
- "type": "Secret Keyword"
- }
- ],
- "gen3/bin/kube-setup-jenkins.sh": [
+ "line_number": 10
+ },
{
- "hashed_secret": "05ea760643a5c0a9bacb3544dc844ac79938a51f",
- "is_secret": false,
+ "type": "Base64 High Entropy String",
+ "filename": "gen3/lib/testData/etlconvert/expected2.yaml",
+ "hashed_secret": "cea0e701e53c42bede2212b22f58f9ff8324da55",
"is_verified": false,
- "line_number": 18,
- "type": "Secret Keyword"
+ "line_number": 13
},
{
- "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f",
- "is_secret": false,
+ "type": "Base64 High Entropy String",
+ "filename": "gen3/lib/testData/etlconvert/expected2.yaml",
+ "hashed_secret": "d98d72830f08c9a8b96ed11d3d96ae9e71b72a26",
"is_verified": false,
- "line_number": 22,
- "type": "Secret Keyword"
- }
- ],
- "gen3/bin/kube-setup-metadata.sh": [
+ "line_number": 16
+ },
{
- "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd",
- "is_secret": false,
+ "type": "Base64 High Entropy String",
+ "filename": "gen3/lib/testData/etlconvert/expected2.yaml",
+ "hashed_secret": "667fd45d415f73f4132cf0ed11452beb51117b12",
"is_verified": false,
- "line_number": 35,
- "type": "Secret Keyword"
- }
- ],
- "gen3/bin/kube-setup-revproxy.sh": [
+ "line_number": 18
+ },
{
- "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897",
- "is_secret": false,
+ "type": "Base64 High Entropy String",
+ "filename": "gen3/lib/testData/etlconvert/expected2.yaml",
+ "hashed_secret": "c2599d515ba3be74ed58821485ba769fc565e424",
"is_verified": false,
- "line_number": 38,
- "type": "Secret Keyword"
+ "line_number": 33
},
{
- "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f",
- "is_secret": false,
+ "type": "Base64 High Entropy String",
+ "filename": "gen3/lib/testData/etlconvert/expected2.yaml",
+ "hashed_secret": "6ec5eb29e2884f0c9731493b38902e37c2d672ba",
"is_verified": false,
- "line_number": 55,
- "type": "Secret Keyword"
+ "line_number": 35
},
{
- "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd",
- "is_secret": false,
+ "type": "Base64 High Entropy String",
+ "filename": "gen3/lib/testData/etlconvert/expected2.yaml",
+ "hashed_secret": "99126b74731670a59b663d5320712564ec7b5f22",
"is_verified": false,
- "line_number": 57,
- "type": "Secret Keyword"
+ "line_number": 36
}
],
- "gen3/bin/kube-setup-secrets.sh": [
+ "gen3/lib/testData/etlconvert/users2.yaml": [
{
- "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd",
- "is_secret": false,
+ "type": "Base64 High Entropy String",
+ "filename": "gen3/lib/testData/etlconvert/users2.yaml",
+ "hashed_secret": "cea0e701e53c42bede2212b22f58f9ff8324da55",
"is_verified": false,
- "line_number": 79,
- "type": "Secret Keyword"
+ "line_number": 543
},
{
- "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f",
- "is_secret": false,
+ "type": "Base64 High Entropy String",
+ "filename": "gen3/lib/testData/etlconvert/users2.yaml",
+ "hashed_secret": "d98d72830f08c9a8b96ed11d3d96ae9e71b72a26",
"is_verified": false,
- "line_number": 82,
- "type": "Secret Keyword"
+ "line_number": 553
},
{
- "hashed_secret": "6f7531b95bbc99ac25a5cc82edb825f319c5dee8",
- "is_secret": false,
+ "type": "Base64 High Entropy String",
+ "filename": "gen3/lib/testData/etlconvert/users2.yaml",
+ "hashed_secret": "fe54e5e937d642307ec155b47ac8a214cb40d474",
"is_verified": false,
- "line_number": 95,
- "type": "Secret Keyword"
- }
- ],
- "gen3/bin/kube-setup-sftp.sh": [
+ "line_number": 558
+ },
{
- "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd",
- "is_secret": false,
+ "type": "Base64 High Entropy String",
+ "filename": "gen3/lib/testData/etlconvert/users2.yaml",
+ "hashed_secret": "667fd45d415f73f4132cf0ed11452beb51117b12",
"is_verified": false,
- "line_number": 36,
- "type": "Secret Keyword"
+ "line_number": 568
},
{
- "hashed_secret": "83d11e3aec005a3b9a2077c6800683e202a95af4",
- "is_secret": false,
+ "type": "Base64 High Entropy String",
+ "filename": "gen3/lib/testData/etlconvert/users2.yaml",
+ "hashed_secret": "c2599d515ba3be74ed58821485ba769fc565e424",
"is_verified": false,
- "line_number": 51,
- "type": "Secret Keyword"
- }
- ],
- "gen3/bin/kube-setup-sheepdog.sh": [
+ "line_number": 643
+ },
{
- "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f",
- "is_secret": false,
+ "type": "Base64 High Entropy String",
+ "filename": "gen3/lib/testData/etlconvert/users2.yaml",
+ "hashed_secret": "6ec5eb29e2884f0c9731493b38902e37c2d672ba",
+ "is_verified": false,
+ "line_number": 653
+ },
+ {
+ "type": "Base64 High Entropy String",
+ "filename": "gen3/lib/testData/etlconvert/users2.yaml",
+ "hashed_secret": "99126b74731670a59b663d5320712564ec7b5f22",
"is_verified": false,
- "line_number": 33,
- "type": "Secret Keyword"
+ "line_number": 658
}
],
- "gen3/bin/kube-setup-sower-jobs.sh": [
+ "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml": [
{
- "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
"is_verified": false,
- "line_number": 25,
- "type": "Secret Keyword"
+ "line_number": 71
},
{
- "hashed_secret": "e7064f0b80f61dbc65915311032d27baa569ae2a",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
"is_verified": false,
- "line_number": 26,
- "type": "Secret Keyword"
+ "line_number": 74
},
{
- "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
"is_verified": false,
- "line_number": 120,
- "type": "Secret Keyword"
+ "line_number": 77
},
{
- "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
"is_verified": false,
- "line_number": 122,
- "type": "Secret Keyword"
- }
- ],
- "gen3/bin/kube-setup-ssjdispatcher.sh": [
+ "line_number": 87
+ },
{
- "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
"is_verified": false,
- "line_number": 117,
- "type": "Secret Keyword"
+ "line_number": 90
},
{
- "hashed_secret": "7992309146efaa8da936e34b0bd33242cd0e9f93",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
"is_verified": false,
- "line_number": 184,
- "type": "Secret Keyword"
+ "line_number": 93
},
{
- "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml",
+ "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457",
"is_verified": false,
- "line_number": 197,
- "type": "Secret Keyword"
- }
- ],
- "gen3/lib/aws.sh": [
+ "line_number": 96
+ },
{
- "hashed_secret": "8db3b325254b6389ca194d829d2fc923dc0a945d",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml",
+ "hashed_secret": "0849046cdafcdb17f5a4bf5c528430d5e04ad295",
"is_verified": false,
- "line_number": 640,
- "type": "Secret Keyword"
+ "line_number": 99
},
{
- "hashed_secret": "5b4b6c62d3d99d202f095c38c664eded8f640ce8",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
"is_verified": false,
- "line_number": 660,
- "type": "Secret Keyword"
+ "line_number": 102
}
],
- "gen3/lib/bootstrap/templates/Gen3Secrets/apis_configs/fence-config.yaml": [
+ "gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml": [
{
- "hashed_secret": "a94a8fe5ccb19ba61c4c0873d391e987982fbbd3",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml",
+ "hashed_secret": "ec9c944c51e87322de8d22e3ca9e2be1ad8fee0d",
+ "is_verified": false,
+ "line_number": 63
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml",
+ "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc",
+ "is_verified": false,
+ "line_number": 66
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml",
+ "hashed_secret": "e43756046ad1763d6946575fed0e05130a154bd2",
"is_verified": false,
- "line_number": 33,
- "type": "Basic Auth Credentials"
+ "line_number": 72
},
{
- "hashed_secret": "5d07e1b80e448a213b392049888111e1779a52db",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
+ "is_verified": false,
+ "line_number": 75
+ }
+ ],
+ "gen3/test/secretsTest.sh": [
+ {
+ "type": "Secret Keyword",
+ "filename": "gen3/test/secretsTest.sh",
+ "hashed_secret": "c2c715092ef59cba22520f109f041efca84b8938",
"is_verified": false,
- "line_number": 286,
- "type": "Secret Keyword"
+ "line_number": 25
}
],
- "gen3/lib/bootstrap/templates/Gen3Secrets/creds.json": [
+ "gen3/test/terraformTest.sh": [
+ {
+ "type": "Secret Keyword",
+ "filename": "gen3/test/terraformTest.sh",
+ "hashed_secret": "6b44a330b450ee550c081410c6b705dfeaa105ce",
+ "is_verified": false,
+ "line_number": 156
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "gen3/test/terraformTest.sh",
+ "hashed_secret": "d869db7fe62fb07c25a0403ecaea55031744b5fb",
+ "is_verified": false,
+ "line_number": 163
+ },
+ {
+ "type": "Base64 High Entropy String",
+ "filename": "gen3/test/terraformTest.sh",
+ "hashed_secret": "1cc07dccfdf640eb0e403e490a873a5536759009",
+ "is_verified": false,
+ "line_number": 172
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "gen3/test/terraformTest.sh",
+ "hashed_secret": "1cc07dccfdf640eb0e403e490a873a5536759009",
+ "is_verified": false,
+ "line_number": 172
+ },
+ {
+ "type": "Base64 High Entropy String",
+ "filename": "gen3/test/terraformTest.sh",
+ "hashed_secret": "185a71a740ef6b9b21c84e6eaa47b89c7de181ef",
+ "is_verified": false,
+ "line_number": 175
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "gen3/test/terraformTest.sh",
+ "hashed_secret": "185a71a740ef6b9b21c84e6eaa47b89c7de181ef",
+ "is_verified": false,
+ "line_number": 175
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "gen3/test/terraformTest.sh",
+ "hashed_secret": "212e1d3823c8c9af9e4c0c172164ee292b9a6768",
+ "is_verified": false,
+ "line_number": 311
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "gen3/test/terraformTest.sh",
+ "hashed_secret": "cb80dbb67a1a5bdf4957eea1473789f1c65357c6",
+ "is_verified": false,
+ "line_number": 312
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "gen3/test/terraformTest.sh",
+ "hashed_secret": "5f35c25f4bf588b5fad46e249fcd9221f5257ce4",
+ "is_verified": false,
+ "line_number": 313
+ },
{
- "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "gen3/test/terraformTest.sh",
+ "hashed_secret": "5308421b43dde5775f1993bd25a8163070d65598",
"is_verified": false,
- "line_number": 26,
- "type": "Secret Keyword"
+ "line_number": 314
}
],
- "gen3/lib/bootstrap/templates/Gen3Secrets/g3auto/dbfarm/servers.json": [
+ "kube/services/access-backend/access-backend-deploy.yaml": [
{
- "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/access-backend/access-backend-deploy.yaml",
+ "hashed_secret": "dbf88a0c3d905c669c0fd13bf8172bb34d4b1168",
"is_verified": false,
- "line_number": 5,
- "type": "Secret Keyword"
+ "line_number": 60
}
],
- "gen3/lib/logs/utils.sh": [
+ "kube/services/acronymbot/acronymbot-deploy.yaml": [
{
- "hashed_secret": "76143b4ffc8aa2a53f9700ce229f904e69f1e8b5",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/acronymbot/acronymbot-deploy.yaml",
+ "hashed_secret": "600833390a6b9891d0d8a5f6e3326abb237ac8ca",
"is_verified": false,
- "line_number": 3,
- "type": "Secret Keyword"
+ "line_number": 49
}
],
- "gen3/lib/manifestDefaults/hatchery/hatchery.json": [
+ "kube/services/arborist/arborist-deploy-2.yaml": [
{
- "hashed_secret": "0da0e0005ca04acb407af2681d0bede6d9406039",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/arborist/arborist-deploy-2.yaml",
+ "hashed_secret": "6c57cdfdaaf3cde7a1da6aa94c7d8e46502c4bab",
"is_verified": false,
- "line_number": 78,
- "type": "Secret Keyword"
+ "line_number": 59
}
],
- "gen3/lib/onprem.sh": [
+ "kube/services/arborist/arborist-deploy.yaml": [
{
- "hashed_secret": "29e52a9bac8f274fa41c51fce9c98eba0dd99cb3",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/arborist/arborist-deploy.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
"is_verified": false,
- "line_number": 68,
- "type": "Secret Keyword"
+ "line_number": 64
},
{
- "hashed_secret": "50f013532a9770a2c2cfdc38b7581dd01df69b70",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/arborist/arborist-deploy.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
"is_verified": false,
- "line_number": 84,
- "type": "Secret Keyword"
- }
- ],
- "gen3/lib/secrets/rotate-postgres.sh": [
+ "line_number": 67
+ },
{
- "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/arborist/arborist-deploy.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
"is_verified": false,
- "line_number": 162,
- "type": "Secret Keyword"
+ "line_number": 70
},
{
- "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/arborist/arborist-deploy.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
"is_verified": false,
- "line_number": 250,
- "type": "Secret Keyword"
- }
- ],
- "gen3/lib/testData/etlconvert/expected2.yaml": [
+ "line_number": 77
+ },
{
- "hashed_secret": "fe54e5e937d642307ec155b47ac8a214cb40d474",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/arborist/arborist-deploy.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
"is_verified": false,
- "line_number": 10,
- "type": "Base64 High Entropy String"
+ "line_number": 80
},
{
- "hashed_secret": "cea0e701e53c42bede2212b22f58f9ff8324da55",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/arborist/arborist-deploy.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
"is_verified": false,
- "line_number": 13,
- "type": "Base64 High Entropy String"
+ "line_number": 83
},
{
- "hashed_secret": "d98d72830f08c9a8b96ed11d3d96ae9e71b72a26",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/arborist/arborist-deploy.yaml",
+ "hashed_secret": "ea73fcfdaa415890d5fde24d3b2245671be32f73",
+ "is_verified": false,
+ "line_number": 86
+ }
+ ],
+ "kube/services/argo/workflows/fence-usersync-wf.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/argo/workflows/fence-usersync-wf.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
"is_verified": false,
- "line_number": 16,
- "type": "Base64 High Entropy String"
+ "line_number": 108
},
{
- "hashed_secret": "667fd45d415f73f4132cf0ed11452beb51117b12",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/argo/workflows/fence-usersync-wf.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
"is_verified": false,
- "line_number": 18,
- "type": "Base64 High Entropy String"
+ "line_number": 111
},
{
- "hashed_secret": "c2599d515ba3be74ed58821485ba769fc565e424",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/argo/workflows/fence-usersync-wf.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
"is_verified": false,
- "line_number": 33,
- "type": "Base64 High Entropy String"
+ "line_number": 114
},
{
- "hashed_secret": "6ec5eb29e2884f0c9731493b38902e37c2d672ba",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/argo/workflows/fence-usersync-wf.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
"is_verified": false,
- "line_number": 35,
- "type": "Base64 High Entropy String"
+ "line_number": 117
},
{
- "hashed_secret": "99126b74731670a59b663d5320712564ec7b5f22",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/argo/workflows/fence-usersync-wf.yaml",
+ "hashed_secret": "ea73fcfdaa415890d5fde24d3b2245671be32f73",
"is_verified": false,
- "line_number": 36,
- "type": "Base64 High Entropy String"
+ "line_number": 120
}
],
- "gen3/test/secretsTest.sh": [
+ "kube/services/argocd/values.yaml": [
{
- "hashed_secret": "c2c715092ef59cba22520f109f041efca84b8938",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/argocd/values.yaml",
+ "hashed_secret": "bfc1b86ce643b65bd540989213254b01fd6ad418",
"is_verified": false,
- "line_number": 25,
- "type": "Secret Keyword"
+ "line_number": 1489
}
],
- "gen3/test/terraformTest.sh": [
+ "kube/services/arranger/arranger-deploy.yaml": [
{
- "hashed_secret": "8db3b325254b6389ca194d829d2fc923dc0a945d",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/arranger/arranger-deploy.yaml",
+ "hashed_secret": "0db22b31c9add2d3c76743c0ac6fbc99bb8b4761",
"is_verified": false,
- "line_number": 156,
- "type": "Secret Keyword"
+ "line_number": 61
},
{
- "hashed_secret": "1cc07dccfdf640eb0e403e490a873a5536759009",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/arranger/arranger-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
"is_verified": false,
- "line_number": 172,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 64
+ }
+ ],
+ "kube/services/audit-service/audit-service-deploy.yaml": [
{
- "hashed_secret": "185a71a740ef6b9b21c84e6eaa47b89c7de181ef",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/audit-service/audit-service-deploy.yaml",
+ "hashed_secret": "42cde1c58c36d8bb5804a076e55ac6ec07ef99fc",
+ "is_verified": false,
+ "line_number": 64
+ }
+ ],
+ "kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml",
+ "hashed_secret": "7f834ccb442433fc12ec9532f75c3a4b6a748d4c",
+ "is_verified": false,
+ "line_number": 46
+ }
+ ],
+ "kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
"is_verified": false,
- "line_number": 175,
- "type": "Base64 High Entropy String"
+ "line_number": 56
},
{
- "hashed_secret": "329b7cd8191942bedd337107934d365c43a86e6c",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml",
+ "hashed_secret": "5949b79e0c7082dc78d543cde662871a4f8b8913",
"is_verified": false,
- "line_number": 175,
- "type": "Secret Keyword"
+ "line_number": 59
}
],
- "kube/services/datadog/values.yaml": [
+ "kube/services/cogwheel/cogwheel-deploy.yaml": [
{
- "hashed_secret": "52330dffa4d0795b4199a66428e54eca228e1661",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/cogwheel/cogwheel-deploy.yaml",
+ "hashed_secret": "09b772df628fd10bca646b6a877eb661122210ab",
"is_verified": false,
- "line_number": 20,
- "type": "Secret Keyword"
+ "line_number": 35
}
],
- "kube/services/fenceshib/fenceshib-configmap.yaml": [
+ "kube/services/cohort-middleware/cohort-middleware-deploy.yaml": [
{
- "hashed_secret": "a985e14b9d6744a2d04f29347693b55c116e478c",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/cohort-middleware/cohort-middleware-deploy.yaml",
+ "hashed_secret": "bf22f6c4bd03572f1ef593efc3eb1a7e0b6dcab4",
"is_verified": false,
- "line_number": 375,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 62
+ }
+ ],
+ "kube/services/dashboard/dashboard-deploy.yaml": [
{
- "hashed_secret": "adc747bc5eb82ef4b017f5c3759dcee5aa28c36f",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/dashboard/dashboard-deploy.yaml",
+ "hashed_secret": "9e722d12ce045c8718ab803ed465b2fbe199f3d3",
"is_verified": false,
- "line_number": 376,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 61
+ }
+ ],
+ "kube/services/datadog/values.yaml": [
{
- "hashed_secret": "59b1702ff0eaf92c9271cbd12f587de97df7e13b",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/datadog/values.yaml",
+ "hashed_secret": "4a8ce7ae6a8a7f2624e232b61b18c2ac9789c44b",
"is_verified": false,
- "line_number": 377,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 23
+ }
+ ],
+ "kube/services/datasim/datasim-deploy.yaml": [
{
- "hashed_secret": "b4a748bbfbbca8925d932a47ab3dcb970d34caf5",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/datasim/datasim-deploy.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
"is_verified": false,
- "line_number": 378,
- "type": "Base64 High Entropy String"
+ "line_number": 63
},
{
- "hashed_secret": "af646701a84f7dd9f0e87753f54def881326e78a",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/datasim/datasim-deploy.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
"is_verified": false,
- "line_number": 379,
- "type": "Base64 High Entropy String"
+ "line_number": 66
},
{
- "hashed_secret": "20c15ad9742124dc06e1612282c49bb443ebcbd9",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/datasim/datasim-deploy.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
"is_verified": false,
- "line_number": 380,
- "type": "Base64 High Entropy String"
+ "line_number": 72
},
{
- "hashed_secret": "9caded71b967a11b7a6cd0f20db91f06f3517d12",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/datasim/datasim-deploy.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
"is_verified": false,
- "line_number": 381,
- "type": "Base64 High Entropy String"
+ "line_number": 76
},
{
- "hashed_secret": "8f19501bc9241b71f7b6db929fb35ab12635dcd7",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/datasim/datasim-deploy.yaml",
+ "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457",
"is_verified": false,
- "line_number": 382,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 79
+ }
+ ],
+ "kube/services/dicom-server/dicom-server-deploy.yaml": [
{
- "hashed_secret": "d6220f6a55df1ed11c4250f42ab07bb9da20541a",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/dicom-server/dicom-server-deploy.yaml",
+ "hashed_secret": "706168ac2565a93cceffe2202ac45d3d31c075fb",
"is_verified": false,
- "line_number": 383,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 40
+ }
+ ],
+ "kube/services/fence/fence-canary-deploy.yaml": [
{
- "hashed_secret": "dadd9b96636f9529f2547d05d754dc310ceba0c3",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/fence/fence-canary-deploy.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
"is_verified": false,
- "line_number": 384,
- "type": "Base64 High Entropy String"
+ "line_number": 68
},
{
- "hashed_secret": "3074bc66584550e20c3697a28f67a0762394943c",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/fence/fence-canary-deploy.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
"is_verified": false,
- "line_number": 385,
- "type": "Base64 High Entropy String"
+ "line_number": 71
},
{
- "hashed_secret": "823131319b4c4b4688f44d3e832bfa9696f16b52",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/fence/fence-canary-deploy.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
"is_verified": false,
- "line_number": 386,
- "type": "Base64 High Entropy String"
+ "line_number": 74
},
{
- "hashed_secret": "015b780cbfb76988caf52de8ac974a6781e53110",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/fence/fence-canary-deploy.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
"is_verified": false,
- "line_number": 387,
- "type": "Base64 High Entropy String"
+ "line_number": 84
},
{
- "hashed_secret": "5c8fac33207d74d667680ade09447ea8f43b76d7",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/fence/fence-canary-deploy.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
"is_verified": false,
- "line_number": 388,
- "type": "Base64 High Entropy String"
+ "line_number": 87
},
{
- "hashed_secret": "c0c4bb09d8394e8f001e337bd27ccac355433d9e",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/fence/fence-canary-deploy.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
"is_verified": false,
- "line_number": 389,
- "type": "Base64 High Entropy String"
+ "line_number": 90
},
{
- "hashed_secret": "f95631bcbbbc56e18487dcb242cfb1b3e74b16a1",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/fence/fence-canary-deploy.yaml",
+ "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457",
"is_verified": false,
- "line_number": 390,
- "type": "Base64 High Entropy String"
+ "line_number": 93
},
{
- "hashed_secret": "01a692ab6232e0882a313d148981bab58ab98f53",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/fence/fence-canary-deploy.yaml",
+ "hashed_secret": "0849046cdafcdb17f5a4bf5c528430d5e04ad295",
+ "is_verified": false,
+ "line_number": 96
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fence/fence-canary-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
+ "is_verified": false,
+ "line_number": 99
+ }
+ ],
+ "kube/services/fence/fence-deploy.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fence/fence-deploy.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
+ "is_verified": false,
+ "line_number": 71
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fence/fence-deploy.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
+ "is_verified": false,
+ "line_number": 74
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fence/fence-deploy.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
+ "is_verified": false,
+ "line_number": 77
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fence/fence-deploy.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
+ "is_verified": false,
+ "line_number": 87
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fence/fence-deploy.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
+ "is_verified": false,
+ "line_number": 90
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fence/fence-deploy.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
+ "is_verified": false,
+ "line_number": 93
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fence/fence-deploy.yaml",
+ "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457",
+ "is_verified": false,
+ "line_number": 96
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fence/fence-deploy.yaml",
+ "hashed_secret": "0849046cdafcdb17f5a4bf5c528430d5e04ad295",
+ "is_verified": false,
+ "line_number": 99
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fence/fence-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
+ "is_verified": false,
+ "line_number": 102
+ }
+ ],
+ "kube/services/fenceshib/fenceshib-canary-deploy.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fenceshib/fenceshib-canary-deploy.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
+ "is_verified": false,
+ "line_number": 62
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fenceshib/fenceshib-canary-deploy.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
+ "is_verified": false,
+ "line_number": 65
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fenceshib/fenceshib-canary-deploy.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
+ "is_verified": false,
+ "line_number": 68
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fenceshib/fenceshib-canary-deploy.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
+ "is_verified": false,
+ "line_number": 78
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fenceshib/fenceshib-canary-deploy.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
+ "is_verified": false,
+ "line_number": 81
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fenceshib/fenceshib-canary-deploy.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
+ "is_verified": false,
+ "line_number": 84
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fenceshib/fenceshib-canary-deploy.yaml",
+ "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457",
+ "is_verified": false,
+ "line_number": 87
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fenceshib/fenceshib-canary-deploy.yaml",
+ "hashed_secret": "0849046cdafcdb17f5a4bf5c528430d5e04ad295",
+ "is_verified": false,
+ "line_number": 90
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fenceshib/fenceshib-canary-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
+ "is_verified": false,
+ "line_number": 93
+ }
+ ],
+ "kube/services/fenceshib/fenceshib-deploy.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fenceshib/fenceshib-deploy.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
+ "is_verified": false,
+ "line_number": 69
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fenceshib/fenceshib-deploy.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
+ "is_verified": false,
+ "line_number": 72
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fenceshib/fenceshib-deploy.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
+ "is_verified": false,
+ "line_number": 75
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fenceshib/fenceshib-deploy.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
+ "is_verified": false,
+ "line_number": 85
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fenceshib/fenceshib-deploy.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
+ "is_verified": false,
+ "line_number": 88
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fenceshib/fenceshib-deploy.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
+ "is_verified": false,
+ "line_number": 91
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fenceshib/fenceshib-deploy.yaml",
+ "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457",
+ "is_verified": false,
+ "line_number": 94
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fenceshib/fenceshib-deploy.yaml",
+ "hashed_secret": "0849046cdafcdb17f5a4bf5c528430d5e04ad295",
+ "is_verified": false,
+ "line_number": 97
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fenceshib/fenceshib-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
+ "is_verified": false,
+ "line_number": 100
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fenceshib/fenceshib-deploy.yaml",
+ "hashed_secret": "6c4789c3be186fd5dcbf06723462ccdd2c86dc37",
+ "is_verified": false,
+ "line_number": 103
+ }
+ ],
+ "kube/services/frontend-framework/frontend-framework-deploy.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/frontend-framework/frontend-framework-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
+ "is_verified": false,
+ "line_number": 54
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/frontend-framework/frontend-framework-deploy.yaml",
+ "hashed_secret": "6607b403f74e62246fc6a3c938feffc5a34a7e49",
+ "is_verified": false,
+ "line_number": 57
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/frontend-framework/frontend-framework-deploy.yaml",
+ "hashed_secret": "4b0bb3e58651fe56ee23e59aa6a3cb96dc61ddd2",
+ "is_verified": false,
+ "line_number": 60
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/frontend-framework/frontend-framework-deploy.yaml",
+ "hashed_secret": "e3c7565314f404e3883929f003c65a02a80366e9",
+ "is_verified": false,
+ "line_number": 66
+ }
+ ],
+ "kube/services/frontend-framework/frontend-framework-root-deploy.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/frontend-framework/frontend-framework-root-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
+ "is_verified": false,
+ "line_number": 54
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/frontend-framework/frontend-framework-root-deploy.yaml",
+ "hashed_secret": "6607b403f74e62246fc6a3c938feffc5a34a7e49",
+ "is_verified": false,
+ "line_number": 57
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/frontend-framework/frontend-framework-root-deploy.yaml",
+ "hashed_secret": "4b0bb3e58651fe56ee23e59aa6a3cb96dc61ddd2",
+ "is_verified": false,
+ "line_number": 60
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/frontend-framework/frontend-framework-root-deploy.yaml",
+ "hashed_secret": "e3c7565314f404e3883929f003c65a02a80366e9",
+ "is_verified": false,
+ "line_number": 66
+ }
+ ],
+ "kube/services/gdcapi/gdcapi-deploy.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/gdcapi/gdcapi-deploy.yaml",
+ "hashed_secret": "e8c2f0bacaffbf2f9897217c6770413879945296",
+ "is_verified": false,
+ "line_number": 38
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/gdcapi/gdcapi-deploy.yaml",
+ "hashed_secret": "517cded9f3e3ab79237fde330b97a93f5a943316",
+ "is_verified": false,
+ "line_number": 41
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/gdcapi/gdcapi-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
+ "is_verified": false,
+ "line_number": 44
+ }
+ ],
+ "kube/services/gen3-discovery-ai/gen3-discovery-ai-deploy.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/gen3-discovery-ai/gen3-discovery-ai-deploy.yaml",
+ "hashed_secret": "38ded89f83435a558169dedb91a38f72d6cebf41",
+ "is_verified": false,
+ "line_number": 27
+ }
+ ],
+ "kube/services/google-sa-validation/google-sa-validation-deploy.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/google-sa-validation/google-sa-validation-deploy.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
+ "is_verified": false,
+ "line_number": 54
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/google-sa-validation/google-sa-validation-deploy.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
+ "is_verified": false,
+ "line_number": 57
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/google-sa-validation/google-sa-validation-deploy.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
+ "is_verified": false,
+ "line_number": 63
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/google-sa-validation/google-sa-validation-deploy.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
+ "is_verified": false,
+ "line_number": 67
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/google-sa-validation/google-sa-validation-deploy.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
+ "is_verified": false,
+ "line_number": 70
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/google-sa-validation/google-sa-validation-deploy.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
+ "is_verified": false,
+ "line_number": 73
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/google-sa-validation/google-sa-validation-deploy.yaml",
+ "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457",
+ "is_verified": false,
+ "line_number": 76
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/google-sa-validation/google-sa-validation-deploy.yaml",
+ "hashed_secret": "0849046cdafcdb17f5a4bf5c528430d5e04ad295",
+ "is_verified": false,
+ "line_number": 79
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/google-sa-validation/google-sa-validation-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
+ "is_verified": false,
+ "line_number": 82
+ }
+ ],
+ "kube/services/guppy/guppy-deploy.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/guppy/guppy-deploy.yaml",
+ "hashed_secret": "0db22b31c9add2d3c76743c0ac6fbc99bb8b4761",
+ "is_verified": false,
+ "line_number": 65
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/guppy/guppy-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
+ "is_verified": false,
+ "line_number": 68
+ }
+ ],
+ "kube/services/indexd/indexd-canary-deploy.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/indexd/indexd-canary-deploy.yaml",
+ "hashed_secret": "0b701c1fabb6ba47a7d47d455e3696d207014bd3",
+ "is_verified": false,
+ "line_number": 59
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/indexd/indexd-canary-deploy.yaml",
+ "hashed_secret": "aee98a99696237d70b6854ee4c2d9e42bc696039",
+ "is_verified": false,
+ "line_number": 62
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/indexd/indexd-canary-deploy.yaml",
+ "hashed_secret": "bdecca54d39013d43d3b7f05f2927eaa7df375dc",
+ "is_verified": false,
+ "line_number": 68
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/indexd/indexd-canary-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
+ "is_verified": false,
+ "line_number": 71
+ }
+ ],
+ "kube/services/indexd/indexd-deploy.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/indexd/indexd-deploy.yaml",
+ "hashed_secret": "0b701c1fabb6ba47a7d47d455e3696d207014bd3",
+ "is_verified": false,
+ "line_number": 63
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/indexd/indexd-deploy.yaml",
+ "hashed_secret": "aee98a99696237d70b6854ee4c2d9e42bc696039",
+ "is_verified": false,
+ "line_number": 66
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/indexd/indexd-deploy.yaml",
+ "hashed_secret": "bdecca54d39013d43d3b7f05f2927eaa7df375dc",
+ "is_verified": false,
+ "line_number": 72
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/indexd/indexd-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
+ "is_verified": false,
+ "line_number": 75
+ }
+ ],
+ "kube/services/jenkins-ci-worker/jenkins-ci-worker-deploy.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jenkins-ci-worker/jenkins-ci-worker-deploy.yaml",
+ "hashed_secret": "c937b6fbb346a51ef679dd02ac5c4863e02bfdbf",
+ "is_verified": false,
+ "line_number": 143
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jenkins-ci-worker/jenkins-ci-worker-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
+ "is_verified": false,
+ "line_number": 146
+ }
+ ],
+ "kube/services/jenkins-worker/jenkins-worker-deploy.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jenkins-worker/jenkins-worker-deploy.yaml",
+ "hashed_secret": "c937b6fbb346a51ef679dd02ac5c4863e02bfdbf",
+ "is_verified": false,
+ "line_number": 150
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jenkins-worker/jenkins-worker-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
+ "is_verified": false,
+ "line_number": 153
+ }
+ ],
+ "kube/services/jenkins/jenkins-deploy.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jenkins/jenkins-deploy.yaml",
+ "hashed_secret": "c937b6fbb346a51ef679dd02ac5c4863e02bfdbf",
+ "is_verified": false,
+ "line_number": 157
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jenkins/jenkins-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
+ "is_verified": false,
+ "line_number": 160
+ }
+ ],
+ "kube/services/jenkins2-ci-worker/jenkins2-ci-worker-deploy.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jenkins2-ci-worker/jenkins2-ci-worker-deploy.yaml",
+ "hashed_secret": "c937b6fbb346a51ef679dd02ac5c4863e02bfdbf",
+ "is_verified": false,
+ "line_number": 143
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jenkins2-ci-worker/jenkins2-ci-worker-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
+ "is_verified": false,
+ "line_number": 146
+ }
+ ],
+ "kube/services/jenkins2-worker/jenkins2-worker-deploy.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jenkins2-worker/jenkins2-worker-deploy.yaml",
+ "hashed_secret": "c937b6fbb346a51ef679dd02ac5c4863e02bfdbf",
+ "is_verified": false,
+ "line_number": 146
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jenkins2-worker/jenkins2-worker-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
+ "is_verified": false,
+ "line_number": 149
+ }
+ ],
+ "kube/services/jenkins2/jenkins2-deploy.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jenkins2/jenkins2-deploy.yaml",
+ "hashed_secret": "c937b6fbb346a51ef679dd02ac5c4863e02bfdbf",
+ "is_verified": false,
+ "line_number": 153
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jenkins2/jenkins2-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
+ "is_verified": false,
+ "line_number": 156
+ }
+ ],
+ "kube/services/jobs/arborist-rm-expired-access-cronjob.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/arborist-rm-expired-access-cronjob.yaml",
+ "hashed_secret": "6c57cdfdaaf3cde7a1da6aa94c7d8e46502c4bab",
+ "is_verified": false,
+ "line_number": 37
+ }
+ ],
+ "kube/services/jobs/arborist-rm-expired-access-job.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/arborist-rm-expired-access-job.yaml",
+ "hashed_secret": "6c57cdfdaaf3cde7a1da6aa94c7d8e46502c4bab",
+ "is_verified": false,
+ "line_number": 37
+ }
+ ],
+ "kube/services/jobs/arboristdb-create-job.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/arboristdb-create-job.yaml",
+ "hashed_secret": "6c57cdfdaaf3cde7a1da6aa94c7d8e46502c4bab",
+ "is_verified": false,
+ "line_number": 33
+ }
+ ],
+ "kube/services/jobs/aws-bucket-replicate-job.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/aws-bucket-replicate-job.yaml",
+ "hashed_secret": "deb02468778f4041fb189654698ac948e436732d",
+ "is_verified": false,
+ "line_number": 33
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/aws-bucket-replicate-job.yaml",
+ "hashed_secret": "abe72fcb190ed9c73eb20e198c73a97605b95063",
+ "is_verified": false,
+ "line_number": 36
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/aws-bucket-replicate-job.yaml",
+ "hashed_secret": "ca3cdac59f2bfa45cb014190e4509bf6becf28fb",
+ "is_verified": false,
+ "line_number": 42
+ }
+ ],
+ "kube/services/jobs/bucket-manifest-job.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/bucket-manifest-job.yaml",
+ "hashed_secret": "6c36710fe8825b381388d7005f2c9b5c70175fba",
+ "is_verified": false,
+ "line_number": 33
+ }
+ ],
+ "kube/services/jobs/bucket-replicate-job.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/bucket-replicate-job.yaml",
+ "hashed_secret": "84954f7729144580d612cbb0517aeca8880e3483",
+ "is_verified": false,
+ "line_number": 46
+ }
+ ],
+ "kube/services/jobs/bucket-replication-job.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/bucket-replication-job.yaml",
+ "hashed_secret": "84954f7729144580d612cbb0517aeca8880e3483",
+ "is_verified": false,
+ "line_number": 32
+ }
+ ],
+ "kube/services/jobs/bucket-size-report-job.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/bucket-size-report-job.yaml",
+ "hashed_secret": "7cccf62cb63863d9d3baabed4f576eb0f7039735",
+ "is_verified": false,
+ "line_number": 34
+ }
+ ],
+ "kube/services/jobs/cedar-ingestion-job.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/cedar-ingestion-job.yaml",
+ "hashed_secret": "e1c426d126dcc618dcd0686fc718d509ca6ee3b8",
+ "is_verified": false,
+ "line_number": 54
+ }
+ ],
+ "kube/services/jobs/client-modify-job.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/client-modify-job.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
+ "is_verified": false,
+ "line_number": 41
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/client-modify-job.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
+ "is_verified": false,
+ "line_number": 44
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/client-modify-job.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
+ "is_verified": false,
+ "line_number": 50
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/client-modify-job.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
+ "is_verified": false,
+ "line_number": 54
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/client-modify-job.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
+ "is_verified": false,
+ "line_number": 57
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/client-modify-job.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
+ "is_verified": false,
+ "line_number": 60
+ }
+ ],
+ "kube/services/jobs/cogwheel-register-client-job.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/cogwheel-register-client-job.yaml",
+ "hashed_secret": "09b772df628fd10bca646b6a877eb661122210ab",
+ "is_verified": false,
+ "line_number": 40
+ }
+ ],
+ "kube/services/jobs/config-fence-job.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/config-fence-job.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
+ "is_verified": false,
+ "line_number": 44
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/config-fence-job.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
+ "is_verified": false,
+ "line_number": 54
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/config-fence-job.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
+ "is_verified": false,
+ "line_number": 57
+ }
+ ],
+ "kube/services/jobs/covid19-etl-job.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/covid19-etl-job.yaml",
+ "hashed_secret": "a7a2b42615b2b256a7c601c77c426e5d6cafb212",
+ "is_verified": false,
+ "line_number": 34
+ }
+ ],
+ "kube/services/jobs/covid19-notebook-etl-job.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/covid19-notebook-etl-job.yaml",
+ "hashed_secret": "a7a2b42615b2b256a7c601c77c426e5d6cafb212",
+ "is_verified": false,
+ "line_number": 33
+ }
+ ],
+ "kube/services/jobs/data-ingestion-job.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/data-ingestion-job.yaml",
+ "hashed_secret": "81e4388059839f71aed21999aa51095c7e545094",
+ "is_verified": false,
+ "line_number": 34
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/data-ingestion-job.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
+ "is_verified": false,
+ "line_number": 48
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/data-ingestion-job.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
+ "is_verified": false,
+ "line_number": 51
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/data-ingestion-job.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
+ "is_verified": false,
+ "line_number": 54
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/data-ingestion-job.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
+ "is_verified": false,
+ "line_number": 60
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/data-ingestion-job.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
+ "is_verified": false,
+ "line_number": 63
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/data-ingestion-job.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
+ "is_verified": false,
+ "line_number": 66
+ }
+ ],
+ "kube/services/jobs/etl-cronjob.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/etl-cronjob.yaml",
+ "hashed_secret": "ca253d1c9dece2da0d6fb24ded7bdb849a475966",
+ "is_verified": false,
+ "line_number": 38
+ }
+ ],
+ "kube/services/jobs/etl-job.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/etl-job.yaml",
+ "hashed_secret": "ca253d1c9dece2da0d6fb24ded7bdb849a475966",
+ "is_verified": false,
+ "line_number": 35
+ }
+ ],
+ "kube/services/jobs/fence-cleanup-expired-ga4gh-info-cronjob.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/fence-cleanup-expired-ga4gh-info-cronjob.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
+ "is_verified": false,
+ "line_number": 43
+ }
+ ],
+ "kube/services/jobs/fence-cleanup-expired-ga4gh-info-job.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/fence-cleanup-expired-ga4gh-info-job.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
+ "is_verified": false,
+ "line_number": 36
+ }
+ ],
+ "kube/services/jobs/fence-db-migrate-job.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/fence-db-migrate-job.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
+ "is_verified": false,
+ "line_number": 36
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/fence-db-migrate-job.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
+ "is_verified": false,
+ "line_number": 39
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/fence-db-migrate-job.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
+ "is_verified": false,
+ "line_number": 42
+ }
+ ],
+ "kube/services/jobs/fence-delete-expired-clients-job.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/fence-delete-expired-clients-job.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
+ "is_verified": false,
+ "line_number": 38
+ }
+ ],
+ "kube/services/jobs/fence-visa-update-cronjob.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/fence-visa-update-cronjob.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
+ "is_verified": false,
+ "line_number": 42
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/fence-visa-update-cronjob.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
+ "is_verified": false,
+ "line_number": 45
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/fence-visa-update-cronjob.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
+ "is_verified": false,
+ "line_number": 48
+ }
+ ],
+ "kube/services/jobs/fence-visa-update-job.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/fence-visa-update-job.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
+ "is_verified": false,
+ "line_number": 36
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/fence-visa-update-job.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
+ "is_verified": false,
+ "line_number": 39
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/fence-visa-update-job.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
+ "is_verified": false,
+ "line_number": 42
+ }
+ ],
+ "kube/services/jobs/fencedb-create-job.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/fencedb-create-job.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
+ "is_verified": false,
+ "line_number": 33
+ }
+ ],
+ "kube/services/jobs/gdcdb-create-job.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/gdcdb-create-job.yaml",
+ "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc",
+ "is_verified": false,
+ "line_number": 33
+ }
+ ],
+ "kube/services/jobs/gen3qa-check-bucket-access-job.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/gen3qa-check-bucket-access-job.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
+ "is_verified": false,
+ "line_number": 177
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/gen3qa-check-bucket-access-job.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
+ "is_verified": false,
+ "line_number": 180
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/gen3qa-check-bucket-access-job.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
+ "is_verified": false,
+ "line_number": 186
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/gen3qa-check-bucket-access-job.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
+ "is_verified": false,
+ "line_number": 190
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/gen3qa-check-bucket-access-job.yaml",
+ "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457",
+ "is_verified": false,
+ "line_number": 193
+ }
+ ],
+ "kube/services/jobs/gentestdata-job.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/gentestdata-job.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
"is_verified": false,
- "line_number": 391,
- "type": "Base64 High Entropy String"
+ "line_number": 67
},
{
- "hashed_secret": "658060a680d415ce6690ad2c3b622ddb33ddd50a",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/gentestdata-job.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
"is_verified": false,
- "line_number": 392,
- "type": "Base64 High Entropy String"
+ "line_number": 70
},
{
- "hashed_secret": "80915b0bd9daa5e1f95cad573892980b1b5a2294",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/gentestdata-job.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
"is_verified": false,
- "line_number": 393,
- "type": "Base64 High Entropy String"
+ "line_number": 76
},
{
- "hashed_secret": "cc55977b293d8cdca8a2c19dfea6874e70057c41",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/gentestdata-job.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
"is_verified": false,
- "line_number": 394,
- "type": "Base64 High Entropy String"
+ "line_number": 80
},
{
- "hashed_secret": "e400ed02add75dd5f3a8c212857acf12027437d1",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/gentestdata-job.yaml",
+ "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457",
"is_verified": false,
- "line_number": 395,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 83
+ }
+ ],
+ "kube/services/jobs/google-bucket-manifest-job.yaml": [
{
- "hashed_secret": "2e819c8baa3b0508a32b77de258655b3f3a6f7cb",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-bucket-manifest-job.yaml",
+ "hashed_secret": "5ca8fff7767e5dd6ebed80e2c8eab66d6f3bf5eb",
"is_verified": false,
- "line_number": 396,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 31
+ }
+ ],
+ "kube/services/jobs/google-bucket-replicate-job.yaml": [
{
- "hashed_secret": "546ed926d58ea5492ab6adb8be94a67aa44ac433",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-bucket-replicate-job.yaml",
+ "hashed_secret": "b6f0ec0b08da77656ced48427841e28d7a8a81d6",
"is_verified": false,
- "line_number": 397,
- "type": "Base64 High Entropy String"
+ "line_number": 35
},
{
- "hashed_secret": "f056f2deceed268e7af6dbdaf2577079c76e006a",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-bucket-replicate-job.yaml",
+ "hashed_secret": "abe72fcb190ed9c73eb20e198c73a97605b95063",
"is_verified": false,
- "line_number": 398,
- "type": "Base64 High Entropy String"
+ "line_number": 38
},
{
- "hashed_secret": "d75efee28f4798c3a9c6f44b78a8500513ef28b2",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-bucket-replicate-job.yaml",
+ "hashed_secret": "ca3cdac59f2bfa45cb014190e4509bf6becf28fb",
"is_verified": false,
- "line_number": 399,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 41
+ }
+ ],
+ "kube/services/jobs/google-create-bucket-job.yaml": [
{
- "hashed_secret": "7803ae08cdc22a5e0b025eff3c9ef0628eedc165",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-create-bucket-job.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
"is_verified": false,
- "line_number": 419,
- "type": "Base64 High Entropy String"
+ "line_number": 78
},
{
- "hashed_secret": "b8b61e87f5b58b0eeb597b2122ea0cea2ccab3d9",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-create-bucket-job.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
"is_verified": false,
- "line_number": 420,
- "type": "Base64 High Entropy String"
+ "line_number": 81
},
{
- "hashed_secret": "787745fc904c3bd7eddc3d1aab683a376c13890f",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-create-bucket-job.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
"is_verified": false,
- "line_number": 423,
- "type": "Base64 High Entropy String"
+ "line_number": 84
},
{
- "hashed_secret": "81361d672f238f505a6246ef9b655ee2f48d67e7",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-create-bucket-job.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
"is_verified": false,
- "line_number": 424,
- "type": "Base64 High Entropy String"
+ "line_number": 91
},
{
- "hashed_secret": "7c98bff76ac3f273d15ed9bc3dd5294d323ab577",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-create-bucket-job.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
"is_verified": false,
- "line_number": 425,
- "type": "Base64 High Entropy String"
+ "line_number": 94
},
{
- "hashed_secret": "46038fc88daceed8dd46817ca45c72ae0270fdd4",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-create-bucket-job.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
"is_verified": false,
- "line_number": 426,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 97
+ }
+ ],
+ "kube/services/jobs/google-delete-expired-access-cronjob.yaml": [
{
- "hashed_secret": "acad0c57b4f5cbed1b4863ed06d02784180a9f92",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-delete-expired-access-cronjob.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
"is_verified": false,
- "line_number": 427,
- "type": "Base64 High Entropy String"
+ "line_number": 43
},
{
- "hashed_secret": "1b57f49a6ee337c16ecd6aabfc0dff3b3821cd09",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-delete-expired-access-cronjob.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
"is_verified": false,
- "line_number": 428,
- "type": "Base64 High Entropy String"
+ "line_number": 46
},
{
- "hashed_secret": "5b688158be36e8b3f265a462ed599dcf69290084",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-delete-expired-access-cronjob.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
"is_verified": false,
- "line_number": 429,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 49
+ }
+ ],
+ "kube/services/jobs/google-delete-expired-access-job.yaml": [
{
- "hashed_secret": "965996e12c8b50b3c325d96003e8984a4ece658a",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-delete-expired-access-job.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
"is_verified": false,
- "line_number": 430,
- "type": "Base64 High Entropy String"
+ "line_number": 36
},
{
- "hashed_secret": "584f0c58e764e948af1a35c9e60447aa0f84c6f5",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-delete-expired-access-job.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
"is_verified": false,
- "line_number": 431,
- "type": "Base64 High Entropy String"
+ "line_number": 39
},
{
- "hashed_secret": "bcaf897786d060a675ee9d654a84ae8baf96e9d0",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-delete-expired-access-job.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
"is_verified": false,
- "line_number": 432,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 42
+ }
+ ],
+ "kube/services/jobs/google-delete-expired-service-account-cronjob.yaml": [
{
- "hashed_secret": "0c09277fa183e06d32065f9386a3b4190b445df3",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-delete-expired-service-account-cronjob.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
"is_verified": false,
- "line_number": 433,
- "type": "Base64 High Entropy String"
+ "line_number": 48
},
{
- "hashed_secret": "5a51be06b305d6664e4afd25f21869b0f8b5039b",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-delete-expired-service-account-cronjob.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
"is_verified": false,
- "line_number": 434,
- "type": "Base64 High Entropy String"
+ "line_number": 51
},
{
- "hashed_secret": "b38404f8853d734e3d03577b2c1084b4540c8708",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-delete-expired-service-account-cronjob.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
"is_verified": false,
- "line_number": 435,
- "type": "Base64 High Entropy String"
+ "line_number": 57
},
{
- "hashed_secret": "126ccc602cffcb8292beb57137f7f6719e317b72",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-delete-expired-service-account-cronjob.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
"is_verified": false,
- "line_number": 436,
- "type": "Base64 High Entropy String"
+ "line_number": 61
},
{
- "hashed_secret": "6681c1d7e1d327642a32cb8864ad51e4b8f981e5",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-delete-expired-service-account-cronjob.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
"is_verified": false,
- "line_number": 437,
- "type": "Base64 High Entropy String"
+ "line_number": 64
},
{
- "hashed_secret": "7f7b1f316ece195e5f584fe2faf6f9edc6942c6f",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-delete-expired-service-account-cronjob.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
"is_verified": false,
- "line_number": 439,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 67
+ }
+ ],
+ "kube/services/jobs/google-delete-expired-service-account-job.yaml": [
{
- "hashed_secret": "bb908c7bc655057f2edc42815c5dff82e9dea529",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-delete-expired-service-account-job.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
"is_verified": false,
- "line_number": 440,
- "type": "Base64 High Entropy String"
+ "line_number": 40
},
{
- "hashed_secret": "bc2a0d18e3dd142df7b34e95342d47bf8aadabcb",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-delete-expired-service-account-job.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
"is_verified": false,
- "line_number": 441,
- "type": "Base64 High Entropy String"
+ "line_number": 43
},
{
- "hashed_secret": "d60f0bcea109bb6edb6e45fd387f5f2c86e49e1a",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-delete-expired-service-account-job.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
"is_verified": false,
- "line_number": 442,
- "type": "Base64 High Entropy String"
+ "line_number": 49
},
{
- "hashed_secret": "e549dd40a741557cc1c4e377df0a141354e22688",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-delete-expired-service-account-job.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
"is_verified": false,
- "line_number": 443,
- "type": "Base64 High Entropy String"
+ "line_number": 53
},
{
- "hashed_secret": "2dd2486dae84cad50387c20bf687b6fbc6162b58",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-delete-expired-service-account-job.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
"is_verified": false,
- "line_number": 444,
- "type": "Base64 High Entropy String"
+ "line_number": 56
},
{
- "hashed_secret": "71622010fc7eb09d9273f59c548bde6a5da5dc0e",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-delete-expired-service-account-job.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
"is_verified": false,
- "line_number": 445,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 59
+ }
+ ],
+ "kube/services/jobs/google-init-proxy-groups-cronjob.yaml": [
{
- "hashed_secret": "6f0115cf53bd49ec990c562ac6cbfc452c83cd46",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-init-proxy-groups-cronjob.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
"is_verified": false,
- "line_number": 446,
- "type": "Base64 High Entropy String"
+ "line_number": 48
},
{
- "hashed_secret": "70dddd534b2f9bb70871fefe0845b79c3b69363f",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-init-proxy-groups-cronjob.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
"is_verified": false,
- "line_number": 448,
- "type": "Base64 High Entropy String"
+ "line_number": 51
},
{
- "hashed_secret": "acf3536b0416aa99608b0be17e87655370ece829",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-init-proxy-groups-cronjob.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
"is_verified": false,
- "line_number": 449,
- "type": "Base64 High Entropy String"
+ "line_number": 54
},
{
- "hashed_secret": "1d13ee35c7279c1fae1c6474ed47611994273e41",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-init-proxy-groups-cronjob.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
"is_verified": false,
- "line_number": 450,
- "type": "Base64 High Entropy String"
+ "line_number": 61
},
{
- "hashed_secret": "d38cf89b25bd7378cdb4e00b4b59293001dd500b",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-init-proxy-groups-cronjob.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
"is_verified": false,
- "line_number": 451,
- "type": "Base64 High Entropy String"
+ "line_number": 64
},
{
- "hashed_secret": "1648f34ce2f1b563a8ed1c6d5d55b5e76a395903",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-init-proxy-groups-cronjob.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
"is_verified": false,
- "line_number": 452,
- "type": "Base64 High Entropy String"
+ "line_number": 67
},
{
- "hashed_secret": "9bf63f6f49fb01ff80959bc5a60c8688df92cc02",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-init-proxy-groups-cronjob.yaml",
+ "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457",
"is_verified": false,
- "line_number": 453,
- "type": "Base64 High Entropy String"
+ "line_number": 70
}
],
- "kube/services/jobs/indexd-authz-job.yaml": [
+ "kube/services/jobs/google-init-proxy-groups-job.yaml": [
{
- "hashed_secret": "bf21a9e8fbc5a3846fb05b4fa0859e0917b2202f",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-init-proxy-groups-job.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
"is_verified": false,
- "line_number": 70,
- "type": "Basic Auth Credentials"
- }
- ],
- "kube/services/monitoring/grafana-values.yaml": [
+ "line_number": 40
+ },
{
- "hashed_secret": "2ae868079d293e0a185c671c7bcdac51df36e385",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-init-proxy-groups-job.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
"is_verified": false,
- "line_number": 162,
- "type": "Secret Keyword"
+ "line_number": 43
},
{
- "hashed_secret": "7a64ff8446b06d38dc271019994f13823a2cbcf4",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-init-proxy-groups-job.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
"is_verified": false,
- "line_number": 166,
- "type": "Secret Keyword"
- }
- ],
- "kube/services/ohdsi-atlas/README.md": [
+ "line_number": 46
+ },
{
- "hashed_secret": "6e71f9f2b1e96de5a712f899ed26477ebc260a73",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-init-proxy-groups-job.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
"is_verified": false,
- "line_number": 105,
- "type": "Secret Keyword"
+ "line_number": 53
},
{
- "hashed_secret": "317b889ca9fa8789dc1b85714568b1bdf2c7baf3",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-init-proxy-groups-job.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
"is_verified": false,
- "line_number": 108,
- "type": "Secret Keyword"
- }
- ],
- "kube/services/revproxy/helpers.js": [
+ "line_number": 56
+ },
{
- "hashed_secret": "1d278d3c888d1a2fa7eed622bfc02927ce4049af",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-init-proxy-groups-job.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
"is_verified": false,
- "line_number": 10,
- "type": "Base64 High Entropy String"
- }
- ],
- "kube/services/revproxy/helpersTest.js": [
+ "line_number": 59
+ },
{
- "hashed_secret": "e029d4904cc728879d70030572bf37d4510367cb",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-init-proxy-groups-job.yaml",
+ "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457",
"is_verified": false,
- "line_number": 22,
- "type": "JSON Web Token"
+ "line_number": 62
}
],
- "kube/services/superset/superset-deploy.yaml": [
+ "kube/services/jobs/google-manage-account-access-cronjob.yaml": [
{
- "hashed_secret": "96e4aceb7cf284be363aa248a32a7cc89785a9f7",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-account-access-cronjob.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
"is_verified": false,
- "line_number": 38,
- "type": "Secret Keyword"
- }
- ],
- "kube/services/superset/superset-redis.yaml": [
+ "line_number": 48
+ },
{
- "hashed_secret": "4af3596275edcb7cd5cc6c3c38bc10479902a08f",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-account-access-cronjob.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
"is_verified": false,
- "line_number": 165,
- "type": "Secret Keyword"
+ "line_number": 51
},
{
- "hashed_secret": "244f421f896bdcdd2784dccf4eaf7c8dfd5189b5",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-account-access-cronjob.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
"is_verified": false,
- "line_number": 260,
- "type": "Secret Keyword"
- }
- ],
- "kube/services/superset/superset/superset-deploy.yaml": [
+ "line_number": 54
+ },
{
- "hashed_secret": "96e4aceb7cf284be363aa248a32a7cc89785a9f7",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-account-access-cronjob.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
"is_verified": false,
- "line_number": 38,
- "type": "Secret Keyword"
- }
- ],
- "kube/services/superset/superset/superset-redis.yaml": [
+ "line_number": 61
+ },
{
- "hashed_secret": "4af3596275edcb7cd5cc6c3c38bc10479902a08f",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-account-access-cronjob.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
"is_verified": false,
- "line_number": 169,
- "type": "Secret Keyword"
+ "line_number": 64
},
{
- "hashed_secret": "244f421f896bdcdd2784dccf4eaf7c8dfd5189b5",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-account-access-cronjob.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
"is_verified": false,
- "line_number": 266,
- "type": "Secret Keyword"
+ "line_number": 67
}
],
- "kube/services/superset/values.yaml": [
+ "kube/services/jobs/google-manage-account-access-job.yaml": [
{
- "hashed_secret": "6f803b24314c39062efe38d0c1da8c472f47eab3",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-account-access-job.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
"is_verified": false,
- "line_number": 54,
- "type": "Secret Keyword"
+ "line_number": 40
},
{
- "hashed_secret": "6eae3a5b062c6d0d79f070c26e6d62486b40cb46",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-account-access-job.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
"is_verified": false,
- "line_number": 86,
- "type": "Secret Keyword"
+ "line_number": 43
},
{
- "hashed_secret": "3eb416223e9e69e6bb8ee19793911ad1ad2027d8",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-account-access-job.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
"is_verified": false,
- "line_number": 212,
- "type": "Secret Keyword"
+ "line_number": 46
},
{
- "hashed_secret": "ff55435345834a3fe224936776c2aa15f6ed5358",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-account-access-job.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
"is_verified": false,
- "line_number": 396,
- "type": "Secret Keyword"
+ "line_number": 53
},
{
- "hashed_secret": "98a84a63e5633d17e3b27b69695f87aa7189e9dc",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-account-access-job.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
"is_verified": false,
- "line_number": 503,
- "type": "Secret Keyword"
- }
- ],
- "package-lock.json": [
+ "line_number": 56
+ },
{
- "hashed_secret": "c95b6bc99445e7ed9177040f5ef94d0cdb38fb21",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-account-access-job.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
"is_verified": false,
- "line_number": 10,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 59
+ }
+ ],
+ "kube/services/jobs/google-manage-keys-cronjob.yaml": [
{
- "hashed_secret": "a896da46c897d3a0d007843006621f78dbcabf51",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-keys-cronjob.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
"is_verified": false,
- "line_number": 19,
- "type": "Base64 High Entropy String"
+ "line_number": 48
},
{
- "hashed_secret": "84b662fc9a2a275f90d0afafe6ce08a4d0928ac8",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-keys-cronjob.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
"is_verified": false,
- "line_number": 28,
- "type": "Base64 High Entropy String"
+ "line_number": 51
},
{
- "hashed_secret": "6ebe9724873357aaea25e329efb726fa61b843e7",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-keys-cronjob.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
"is_verified": false,
- "line_number": 39,
- "type": "Base64 High Entropy String"
+ "line_number": 54
},
{
- "hashed_secret": "f1dbba169db046906924ccd784068a2306096634",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-keys-cronjob.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
"is_verified": false,
- "line_number": 44,
- "type": "Base64 High Entropy String"
+ "line_number": 61
},
{
- "hashed_secret": "2c7bd6cdc39b5b8a0f32aa11988a0ec769526cdb",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-keys-cronjob.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
"is_verified": false,
- "line_number": 52,
- "type": "Base64 High Entropy String"
+ "line_number": 64
},
{
- "hashed_secret": "1addd61f68d977408128e530959437821a6d8b66",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-keys-cronjob.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
"is_verified": false,
- "line_number": 57,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 67
+ }
+ ],
+ "kube/services/jobs/google-manage-keys-job.yaml": [
{
- "hashed_secret": "9787d966f19a0d8d0021b31d34cfdfcebdb9c28a",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-keys-job.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
"is_verified": false,
- "line_number": 65,
- "type": "Base64 High Entropy String"
+ "line_number": 40
},
{
- "hashed_secret": "76693e518c3c8123e9a197821b506292322a0a95",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-keys-job.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
"is_verified": false,
- "line_number": 70,
- "type": "Base64 High Entropy String"
+ "line_number": 43
},
{
- "hashed_secret": "fa83dcbf0f435ee38066d19a2a43815510f96bc4",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-keys-job.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
"is_verified": false,
- "line_number": 86,
- "type": "Base64 High Entropy String"
+ "line_number": 46
},
{
- "hashed_secret": "017a7eab3d63331ecfe768927c8907a5a31888e5",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-keys-job.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
"is_verified": false,
- "line_number": 91,
- "type": "Base64 High Entropy String"
+ "line_number": 53
},
{
- "hashed_secret": "92b56edda4f2906f548fe77c015490e6ba2ee4c3",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-keys-job.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
"is_verified": false,
- "line_number": 96,
- "type": "Base64 High Entropy String"
+ "line_number": 56
},
{
- "hashed_secret": "936b0959aa13f1decc76be1d80acaac0860847b7",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-keys-job.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
"is_verified": false,
- "line_number": 101,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 59
+ }
+ ],
+ "kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml": [
{
- "hashed_secret": "4bad86c43b7cd06efc130272d8e4de2b32636371",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
"is_verified": false,
- "line_number": 109,
- "type": "Base64 High Entropy String"
+ "line_number": 48
},
{
- "hashed_secret": "d11716ecfa623706b733654d78f4e7af3c117efa",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
"is_verified": false,
- "line_number": 143,
- "type": "Base64 High Entropy String"
+ "line_number": 51
},
{
- "hashed_secret": "0cc93dfdf4ae08bc374b99af985b25d2427f71d8",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
"is_verified": false,
- "line_number": 148,
- "type": "Base64 High Entropy String"
+ "line_number": 54
},
{
- "hashed_secret": "80f8d53f3fedde239f695d6a4c44c78b4aff0a44",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
"is_verified": false,
- "line_number": 153,
- "type": "Base64 High Entropy String"
+ "line_number": 61
},
{
- "hashed_secret": "83307cb75a4a44ba528f4a0aefcec2a8018dc6d8",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
"is_verified": false,
- "line_number": 158,
- "type": "Base64 High Entropy String"
+ "line_number": 64
},
{
- "hashed_secret": "c96d81662cc7919208154e7152fa0033391b7bcd",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
"is_verified": false,
- "line_number": 166,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 67
+ }
+ ],
+ "kube/services/jobs/google-verify-bucket-access-group-job.yaml": [
{
- "hashed_secret": "7156492f40fb2479a45780b3d2959c29b27b6374",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-verify-bucket-access-group-job.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
"is_verified": false,
- "line_number": 181,
- "type": "Base64 High Entropy String"
+ "line_number": 40
},
{
- "hashed_secret": "885304335818f51938422166d361cddacfd626d0",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-verify-bucket-access-group-job.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
"is_verified": false,
- "line_number": 186,
- "type": "Base64 High Entropy String"
+ "line_number": 43
},
{
- "hashed_secret": "915ca894a8ec19ffcd55555e6c8daac1fe882751",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-verify-bucket-access-group-job.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
"is_verified": false,
- "line_number": 191,
- "type": "Base64 High Entropy String"
+ "line_number": 46
},
{
- "hashed_secret": "7ea379a1bf787a21401c8c39f285e4e84b478d72",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-verify-bucket-access-group-job.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
"is_verified": false,
- "line_number": 201,
- "type": "Base64 High Entropy String"
+ "line_number": 53
},
{
- "hashed_secret": "8e948a3b773d1a2e4b6f4220216efa734315246d",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-verify-bucket-access-group-job.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
"is_verified": false,
- "line_number": 209,
- "type": "Base64 High Entropy String"
+ "line_number": 56
},
{
- "hashed_secret": "1a321d0b0d9b6d75888ce7ae121ac222cec1eddd",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-verify-bucket-access-group-job.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
"is_verified": false,
- "line_number": 217,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 59
+ }
+ ],
+ "kube/services/jobs/graph-create-job.yaml": [
{
- "hashed_secret": "1a6bfe25744ad6c6ce27c3a52dbd98c15be12a5c",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/graph-create-job.yaml",
+ "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc",
"is_verified": false,
- "line_number": 222,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 33
+ }
+ ],
+ "kube/services/jobs/indexd-authz-job.yaml": [
{
- "hashed_secret": "04450eaacfa844f84926d04d6a07534cde99b28e",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/indexd-authz-job.yaml",
+ "hashed_secret": "0b701c1fabb6ba47a7d47d455e3696d207014bd3",
"is_verified": false,
- "line_number": 227,
- "type": "Base64 High Entropy String"
+ "line_number": 32
},
{
- "hashed_secret": "b4c295435d09bbdfb91ced9040379166d67ccbd2",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/indexd-authz-job.yaml",
+ "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc",
"is_verified": false,
- "line_number": 232,
- "type": "Base64 High Entropy String"
+ "line_number": 35
},
{
- "hashed_secret": "bb2bf296d6e086b471d45a26af9fd57f55289a75",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/indexd-authz-job.yaml",
+ "hashed_secret": "aee98a99696237d70b6854ee4c2d9e42bc696039",
"is_verified": false,
- "line_number": 237,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 38
+ }
+ ],
+ "kube/services/jobs/indexd-userdb-job.yaml": [
{
- "hashed_secret": "9579b6a23d94d56f2f163233b716d8752e6b3bde",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/indexd-userdb-job.yaml",
+ "hashed_secret": "0b701c1fabb6ba47a7d47d455e3696d207014bd3",
"is_verified": false,
- "line_number": 256,
- "type": "Base64 High Entropy String"
+ "line_number": 40
},
{
- "hashed_secret": "796925256bc0f4dc43cdfab7fbff852eace18f42",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/indexd-userdb-job.yaml",
+ "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc",
"is_verified": false,
- "line_number": 287,
- "type": "Base64 High Entropy String"
+ "line_number": 43
},
{
- "hashed_secret": "7e280af4ec2d573144d98e89ed2e1dfd817ca48f",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/indexd-userdb-job.yaml",
+ "hashed_secret": "aee98a99696237d70b6854ee4c2d9e42bc696039",
"is_verified": false,
- "line_number": 295,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 46
+ }
+ ],
+ "kube/services/jobs/metadata-aggregate-sync-job.yaml": [
{
- "hashed_secret": "941b3e7836a6f26d32311893ac5d9ad0a52c45ca",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/metadata-aggregate-sync-job.yaml",
+ "hashed_secret": "e14f65c8ca7f3b27a0f0f5463569954841e162c9",
"is_verified": false,
- "line_number": 300,
- "type": "Base64 High Entropy String"
+ "line_number": 31
},
{
- "hashed_secret": "34743e1f7d9541c4a726b998f20baf828c694213",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/metadata-aggregate-sync-job.yaml",
+ "hashed_secret": "c27babf45eb0ed87329e69c7d47dba611e859c5d",
"is_verified": false,
- "line_number": 305,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 34
+ }
+ ],
+ "kube/services/jobs/metadata-delete-expired-objects-job.yaml": [
{
- "hashed_secret": "c4fea87bd49c4427d7215d57ada9ff3177e0c471",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/metadata-delete-expired-objects-job.yaml",
+ "hashed_secret": "0cc8bac3fabe63722716d1e6fe04a8dded1e3ad0",
"is_verified": false,
- "line_number": 310,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 24
+ }
+ ],
+ "kube/services/jobs/remove-objects-from-clouds-job.yaml": [
{
- "hashed_secret": "85324324e21d0dfbfb5248ac92fa0f289d2e25f8",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/remove-objects-from-clouds-job.yaml",
+ "hashed_secret": "deb02468778f4041fb189654698ac948e436732d",
"is_verified": false,
- "line_number": 315,
- "type": "Base64 High Entropy String"
+ "line_number": 34
},
{
- "hashed_secret": "19eea0e64f6a3311b04e472035df10c23f23dd0a",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/remove-objects-from-clouds-job.yaml",
+ "hashed_secret": "b6f0ec0b08da77656ced48427841e28d7a8a81d6",
"is_verified": false,
- "line_number": 352,
- "type": "Base64 High Entropy String"
+ "line_number": 37
},
{
- "hashed_secret": "acce4ef8d841ffa646256da3af7b79ad5cb78158",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/remove-objects-from-clouds-job.yaml",
+ "hashed_secret": "ca3cdac59f2bfa45cb014190e4509bf6becf28fb",
"is_verified": false,
- "line_number": 364,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 43
+ }
+ ],
+ "kube/services/jobs/replicate-validation-job.yaml": [
{
- "hashed_secret": "22e7ae9b65ade417baac61e6f0d84a54783ba759",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/replicate-validation-job.yaml",
+ "hashed_secret": "deb02468778f4041fb189654698ac948e436732d",
"is_verified": false,
- "line_number": 369,
- "type": "Base64 High Entropy String"
+ "line_number": 34
},
{
- "hashed_secret": "8e71b7828c7c554f05dbbabddd63301b5fc56771",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/replicate-validation-job.yaml",
+ "hashed_secret": "b6f0ec0b08da77656ced48427841e28d7a8a81d6",
"is_verified": false,
- "line_number": 374,
- "type": "Base64 High Entropy String"
+ "line_number": 37
},
{
- "hashed_secret": "fea0d9c5b0c53c41e6a0a961a49cccc170847120",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/replicate-validation-job.yaml",
+ "hashed_secret": "abe72fcb190ed9c73eb20e198c73a97605b95063",
"is_verified": false,
- "line_number": 379,
- "type": "Base64 High Entropy String"
+ "line_number": 40
},
{
- "hashed_secret": "ebe2160ede628e0faeac9fe70c215cd38d28d8f6",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/replicate-validation-job.yaml",
+ "hashed_secret": "ca3cdac59f2bfa45cb014190e4509bf6becf28fb",
"is_verified": false,
- "line_number": 384,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 43
+ }
+ ],
+ "kube/services/jobs/s3sync-cronjob.yaml": [
{
- "hashed_secret": "9cb2b0347722893cde39bbe83f9df7c3c6e1b7c3",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/s3sync-cronjob.yaml",
+ "hashed_secret": "27f6dfe15698a3bfaa183c84701cfb2bf4115415",
"is_verified": false,
- "line_number": 398,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 44
+ }
+ ],
+ "kube/services/jobs/usersync-job.yaml": [
{
- "hashed_secret": "344e37e02a35dd31cc7dc945b7fe7b2da88344c0",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/usersync-job.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
"is_verified": false,
- "line_number": 403,
- "type": "Base64 High Entropy String"
+ "line_number": 64
},
{
- "hashed_secret": "31a41817127c8d2b7b304c326b05d7319934e7a6",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/usersync-job.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
"is_verified": false,
- "line_number": 413,
- "type": "Base64 High Entropy String"
+ "line_number": 67
},
{
- "hashed_secret": "150852e9f1e877547306d59618a136fb535b40e3",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/usersync-job.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
"is_verified": false,
- "line_number": 418,
- "type": "Base64 High Entropy String"
+ "line_number": 70
},
{
- "hashed_secret": "277e32c5ba00ef90c6f76c7004fde2ecac6d2e18",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/usersync-job.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
"is_verified": false,
- "line_number": 423,
- "type": "Base64 High Entropy String"
+ "line_number": 77
},
{
- "hashed_secret": "b95e69c7f4328ea641952f875c3b079a1585c9d1",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/usersync-job.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
"is_verified": false,
- "line_number": 431,
- "type": "Base64 High Entropy String"
+ "line_number": 80
},
{
- "hashed_secret": "6b30fe731c8444c0263b57aacbdaedb771ec01a5",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/usersync-job.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
"is_verified": false,
- "line_number": 436,
- "type": "Base64 High Entropy String"
+ "line_number": 83
},
{
- "hashed_secret": "98eafa06e0c7e089c19e79dedf5989c3eb2f0568",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/usersync-job.yaml",
+ "hashed_secret": "ea73fcfdaa415890d5fde24d3b2245671be32f73",
"is_verified": false,
- "line_number": 445,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 86
+ }
+ ],
+ "kube/services/jobs/useryaml-job.yaml": [
{
- "hashed_secret": "bf47364c2d4ad0308ef016fe4a89f6c7dc21ef86",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/useryaml-job.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
"is_verified": false,
- "line_number": 464,
- "type": "Base64 High Entropy String"
+ "line_number": 40
},
{
- "hashed_secret": "3e6c18abd5b90c63da0bd8b4c0d3a142e3d5a83d",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/useryaml-job.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
"is_verified": false,
- "line_number": 474,
- "type": "Base64 High Entropy String"
+ "line_number": 43
},
{
- "hashed_secret": "209bf9cfe9000c6851cd4f94165d30ee1cd3dca1",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/useryaml-job.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
"is_verified": false,
- "line_number": 482,
- "type": "Base64 High Entropy String"
+ "line_number": 46
},
{
- "hashed_secret": "cf09cb791688fe019284bfdc362abc41918645a5",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/useryaml-job.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
"is_verified": false,
- "line_number": 487,
- "type": "Base64 High Entropy String"
+ "line_number": 53
},
{
- "hashed_secret": "6c1392daf02b9ba2a21c49c82508048525d5bc4b",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/useryaml-job.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
"is_verified": false,
- "line_number": 492,
- "type": "Base64 High Entropy String"
+ "line_number": 56
},
{
- "hashed_secret": "b4e2bf4f3a071b223da2f270d5a2348d65105d3e",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/useryaml-job.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
"is_verified": false,
- "line_number": 497,
- "type": "Base64 High Entropy String"
+ "line_number": 59
},
{
- "hashed_secret": "98d583792218c3c06ecbcac66e5bedcdaabd63e7",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/useryaml-job.yaml",
+ "hashed_secret": "ea73fcfdaa415890d5fde24d3b2245671be32f73",
"is_verified": false,
- "line_number": 507,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 65
+ }
+ ],
+ "kube/services/kayako-wrapper/kayako-wrapper-deploy.yaml": [
{
- "hashed_secret": "575c9b4e0765ae6ab9a4f38eb1186ea361691f73",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/kayako-wrapper/kayako-wrapper-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
"is_verified": false,
- "line_number": 514,
- "type": "Base64 High Entropy String"
+ "line_number": 56
},
{
- "hashed_secret": "16225dde2ec301d038a0bdbda68de4a174fbfdd0",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/kayako-wrapper/kayako-wrapper-deploy.yaml",
+ "hashed_secret": "fb7ea689a364feb7aafbf8d553eb77073fa7ba11",
"is_verified": false,
- "line_number": 519,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 59
+ }
+ ],
+ "kube/services/kubecost-standalone/thanos-deploy.yaml": [
{
- "hashed_secret": "80d73b6f7e87f07e3ae70ef1e692aa9569574551",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/kubecost-standalone/thanos-deploy.yaml",
+ "hashed_secret": "064376809efc3acda5bd341aca977e149b989696",
"is_verified": false,
- "line_number": 524,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 127
+ }
+ ],
+ "kube/services/kubecost-standalone/values.yaml": [
{
- "hashed_secret": "38952752ebde485c02a80bff1d81ebe95664bcca",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/kubecost-standalone/values.yaml",
+ "hashed_secret": "ec9786daee68e3541963a51299160859fe4db663",
"is_verified": false,
- "line_number": 529,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 30
+ }
+ ],
+ "kube/services/manifestservice/manifestservice-deploy.yaml": [
{
- "hashed_secret": "150b60d278251f2470dd690016afe038bc1bb7f1",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/manifestservice/manifestservice-deploy.yaml",
+ "hashed_secret": "3da2c49c267b6c58401bbf05e379b38d20434f78",
"is_verified": false,
- "line_number": 534,
- "type": "Base64 High Entropy String"
+ "line_number": 61
},
{
- "hashed_secret": "535582d92da3a4158e592ec29868bfd8467b8bce",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/manifestservice/manifestservice-deploy.yaml",
+ "hashed_secret": "469e0c2b1a67aa94955bae023ddc727be31581a7",
"is_verified": false,
- "line_number": 539,
- "type": "Base64 High Entropy String"
+ "line_number": 64
},
{
- "hashed_secret": "23b096d9b48ed5d9a778d3db5807c5c7a2357c93",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/manifestservice/manifestservice-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
"is_verified": false,
- "line_number": 544,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 67
+ }
+ ],
+ "kube/services/metadata/metadata-deploy.yaml": [
{
- "hashed_secret": "127f92724797904fb4e6de2dfff2c71c07739612",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/metadata/metadata-deploy.yaml",
+ "hashed_secret": "e14f65c8ca7f3b27a0f0f5463569954841e162c9",
"is_verified": false,
- "line_number": 549,
- "type": "Base64 High Entropy String"
+ "line_number": 61
},
{
- "hashed_secret": "f74b21c2fc87ad48118b3723372ecfe25aaae730",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/metadata/metadata-deploy.yaml",
+ "hashed_secret": "c27babf45eb0ed87329e69c7d47dba611e859c5d",
"is_verified": false,
- "line_number": 559,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 66
+ }
+ ],
+ "kube/services/monitoring/grafana-values.yaml": [
{
- "hashed_secret": "bc788b9febb8e95114c2e78a9d5297f80bbedb2c",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/monitoring/grafana-values.yaml",
+ "hashed_secret": "2ae868079d293e0a185c671c7bcdac51df36e385",
"is_verified": false,
- "line_number": 564,
- "type": "Base64 High Entropy String"
+ "line_number": 162
},
{
- "hashed_secret": "e9fdc3025cd10bd8aa4508611e6b7b7a9d650a2c",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/monitoring/grafana-values.yaml",
+ "hashed_secret": "5baa61e4c9b93f3f0682250b6cf8331b7ee68fd8",
"is_verified": false,
- "line_number": 575,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 331
+ }
+ ],
+ "kube/services/monitoring/thanos-deploy.yaml": [
{
- "hashed_secret": "36a64bd1be32f031420a87c448636720426e0072",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/monitoring/thanos-deploy.yaml",
+ "hashed_secret": "064376809efc3acda5bd341aca977e149b989696",
"is_verified": false,
- "line_number": 580,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 130
+ }
+ ],
+ "kube/services/ohif-viewer/ohif-viewer-deploy.yaml": [
{
- "hashed_secret": "06a3dc8802aa9b4f2f48ad081cbe64482ce9f491",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/ohif-viewer/ohif-viewer-deploy.yaml",
+ "hashed_secret": "3f87db80519a9ae7d8112f4e0d4cc81441181818",
"is_verified": false,
- "line_number": 585,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 40
+ }
+ ],
+ "kube/services/orthanc/orthanc-deploy.yaml": [
{
- "hashed_secret": "6c8453f18e4aa0280d847454c9a803c12e2d14d7",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/orthanc/orthanc-deploy.yaml",
+ "hashed_secret": "3f87db80519a9ae7d8112f4e0d4cc81441181818",
"is_verified": false,
- "line_number": 590,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 41
+ }
+ ],
+ "kube/services/peregrine/peregrine-canary-deploy.yaml": [
{
- "hashed_secret": "3df46004e168f8d8e3422adfbf0b7c237a41f437",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/peregrine/peregrine-canary-deploy.yaml",
+ "hashed_secret": "6131c35d7eebdbc17a314bef8aac75b87323cff3",
"is_verified": false,
- "line_number": 595,
- "type": "Base64 High Entropy String"
+ "line_number": 61
},
{
- "hashed_secret": "5c270f653b2fcd5b7c700b53f8543df4147a4aba",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/peregrine/peregrine-canary-deploy.yaml",
+ "hashed_secret": "ca253d1c9dece2da0d6fb24ded7bdb849a475966",
"is_verified": false,
- "line_number": 600,
- "type": "Base64 High Entropy String"
+ "line_number": 64
},
{
- "hashed_secret": "98a159a135963e5e65a546879c332b2c3942aec3",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/peregrine/peregrine-canary-deploy.yaml",
+ "hashed_secret": "990a3202b5c94aa5e5997e7dc1a218e457f8b8ec",
"is_verified": false,
- "line_number": 605,
- "type": "Base64 High Entropy String"
+ "line_number": 70
},
{
- "hashed_secret": "58d846ede841bbec0d67a42d03426806635fee2f",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/peregrine/peregrine-canary-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
"is_verified": false,
- "line_number": 610,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 73
+ }
+ ],
+ "kube/services/peregrine/peregrine-deploy.yaml": [
{
- "hashed_secret": "23e42656fba130d56c20abddb94b6b7bfcad69a8",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/peregrine/peregrine-deploy.yaml",
+ "hashed_secret": "6131c35d7eebdbc17a314bef8aac75b87323cff3",
"is_verified": false,
- "line_number": 618,
- "type": "Base64 High Entropy String"
+ "line_number": 67
},
{
- "hashed_secret": "f883f0bd87d8455814f491e2067bd3f62454c7c2",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/peregrine/peregrine-deploy.yaml",
+ "hashed_secret": "ca253d1c9dece2da0d6fb24ded7bdb849a475966",
"is_verified": false,
- "line_number": 623,
- "type": "Base64 High Entropy String"
+ "line_number": 70
},
{
- "hashed_secret": "8ece0f01da9189bae69a60da116040400bbc10e5",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/peregrine/peregrine-deploy.yaml",
+ "hashed_secret": "990a3202b5c94aa5e5997e7dc1a218e457f8b8ec",
"is_verified": false,
- "line_number": 628,
- "type": "Base64 High Entropy String"
+ "line_number": 76
},
{
- "hashed_secret": "75a3c0b9934bd460ff7af9763edb25d749ab7b4e",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/peregrine/peregrine-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
"is_verified": false,
- "line_number": 633,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 79
+ }
+ ],
+ "kube/services/pidgin/pidgin-deploy.yaml": [
{
- "hashed_secret": "baac57cb314beab87420d1da6906a1d2377c7d73",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/pidgin/pidgin-deploy.yaml",
+ "hashed_secret": "49af232c7adfcd54a40202e06261396a757e4ddd",
"is_verified": false,
- "line_number": 638,
- "type": "Base64 High Entropy String"
+ "line_number": 59
},
{
- "hashed_secret": "d0a953de593a0a7b26b925a6476d8382cd31cb0e",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/pidgin/pidgin-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
"is_verified": false,
- "line_number": 654,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 62
+ }
+ ],
+ "kube/services/portal/portal-deploy.yaml": [
{
- "hashed_secret": "8b15238d25347ab18f4cbbe191de9aed597c8ea4",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/portal/portal-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
"is_verified": false,
- "line_number": 659,
- "type": "Base64 High Entropy String"
+ "line_number": 55
},
{
- "hashed_secret": "1e2ab7a2fd9b6afcbe08afcb9dc652b76cf367d8",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/portal/portal-deploy.yaml",
+ "hashed_secret": "5c5a8e158ad2d8544f73cd5422072d414f497faa",
"is_verified": false,
- "line_number": 668,
- "type": "Base64 High Entropy String"
+ "line_number": 58
},
{
- "hashed_secret": "ae745d719f97b3ddb9791348b1f29ff8208c0c5c",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/portal/portal-deploy.yaml",
+ "hashed_secret": "619551216e129bbc5322678abf9c9210c0327cfb",
"is_verified": false,
- "line_number": 676,
- "type": "Base64 High Entropy String"
+ "line_number": 61
},
{
- "hashed_secret": "b72a53c8bebd6540eeffeba5b0c28965bbb2a664",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/portal/portal-deploy.yaml",
+ "hashed_secret": "e3c7565314f404e3883929f003c65a02a80366e9",
"is_verified": false,
- "line_number": 681,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 67
+ }
+ ],
+ "kube/services/portal/portal-root-deploy.yaml": [
{
- "hashed_secret": "97cbb7fbdfe498c80489e26bcdc78fce5db9b258",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/portal/portal-root-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
"is_verified": false,
- "line_number": 686,
- "type": "Base64 High Entropy String"
+ "line_number": 55
},
{
- "hashed_secret": "bc98c415b1c6ee93adf8e97a4a536b6342337c19",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/portal/portal-root-deploy.yaml",
+ "hashed_secret": "5c5a8e158ad2d8544f73cd5422072d414f497faa",
"is_verified": false,
- "line_number": 691,
- "type": "Base64 High Entropy String"
+ "line_number": 58
},
{
- "hashed_secret": "5a6baaacb03a030567b857cb8cfe440407e6385e",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/portal/portal-root-deploy.yaml",
+ "hashed_secret": "619551216e129bbc5322678abf9c9210c0327cfb",
"is_verified": false,
- "line_number": 696,
- "type": "Base64 High Entropy String"
+ "line_number": 61
},
{
- "hashed_secret": "e55a8322e5c7485be2f721155d9ed15afc586a4c",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/portal/portal-root-deploy.yaml",
+ "hashed_secret": "e3c7565314f404e3883929f003c65a02a80366e9",
"is_verified": false,
- "line_number": 705,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 67
+ }
+ ],
+ "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml": [
{
- "hashed_secret": "47709a15a1b02a87f65dfcd5f3e78e0d2206c95f",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
"is_verified": false,
- "line_number": 710,
- "type": "Base64 High Entropy String"
+ "line_number": 74
},
{
- "hashed_secret": "5782d0f39536b22f2c6aa29d3b815a57f43e4800",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
"is_verified": false,
- "line_number": 719,
- "type": "Base64 High Entropy String"
+ "line_number": 77
},
{
- "hashed_secret": "401f90e6afa890c5ee44071351e4a149e7c1f5e0",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
"is_verified": false,
- "line_number": 724,
- "type": "Base64 High Entropy String"
+ "line_number": 80
},
{
- "hashed_secret": "51f38b23af543da8b637a3bd62f5fb2c460e3b3d",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
"is_verified": false,
- "line_number": 729,
- "type": "Base64 High Entropy String"
+ "line_number": 90
},
{
- "hashed_secret": "8287678ab8009ae16b02930c9e260d1f28578fbe",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
"is_verified": false,
- "line_number": 734,
- "type": "Base64 High Entropy String"
+ "line_number": 93
},
{
- "hashed_secret": "d4c050e6914eb68a5c657fb8bb09f6ac5eae1e86",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
"is_verified": false,
- "line_number": 739,
- "type": "Base64 High Entropy String"
+ "line_number": 96
},
{
- "hashed_secret": "922ac7db4914c20910496a41c474631928d6c2f2",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml",
+ "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457",
"is_verified": false,
- "line_number": 750,
- "type": "Base64 High Entropy String"
+ "line_number": 99
},
{
- "hashed_secret": "f7f85d9f7c87f1e576dcaf4cf50f35728f9a3265",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml",
+ "hashed_secret": "0849046cdafcdb17f5a4bf5c528430d5e04ad295",
"is_verified": false,
- "line_number": 771,
- "type": "Base64 High Entropy String"
+ "line_number": 102
},
{
- "hashed_secret": "d7966031d8525b080d7234049cbb040ac9a3f908",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
"is_verified": false,
- "line_number": 798,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 105
+ }
+ ],
+ "kube/services/qa-dashboard/qa-dashboard-deployment.yaml": [
{
- "hashed_secret": "ff3d359d573f78d89424e03ec8688eee19305f9f",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/qa-dashboard/qa-dashboard-deployment.yaml",
+ "hashed_secret": "253939a955a575ac69f409e5914dd0191b704760",
"is_verified": false,
- "line_number": 808,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 63
+ }
+ ],
+ "kube/services/qabot/qabot-deploy.yaml": [
{
- "hashed_secret": "949b4ff40f26797f9290fe46eaa8691caef5c5ab",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/qabot/qabot-deploy.yaml",
+ "hashed_secret": "a9fa7aa8c08b647c3fb696e6598642d4a63e25be",
"is_verified": false,
- "line_number": 817,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 86
+ }
+ ],
+ "kube/services/requestor/requestor-deploy.yaml": [
{
- "hashed_secret": "ce4ea19f66e9140bdb497b19c6ae94c32ee565f0",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/requestor/requestor-deploy.yaml",
+ "hashed_secret": "15debe4170aa5b89858d939f4c0644307ae7789b",
"is_verified": false,
- "line_number": 825,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 61
+ }
+ ],
+ "kube/services/revproxy/gen3.nginx.conf/indexd-service.conf": [
{
- "hashed_secret": "f6368525e9e22577efc8d8b737794e845958ba92",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/revproxy/gen3.nginx.conf/indexd-service.conf",
+ "hashed_secret": "f89523833036f85fed37ce3ebf25492189bc9397",
"is_verified": false,
- "line_number": 834,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 41
+ }
+ ],
+ "kube/services/revproxy/gen3.nginx.conf/metadata-service.conf": [
{
- "hashed_secret": "1508bbaf29927b5348d4df62823dab122a0d3b48",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/revproxy/gen3.nginx.conf/metadata-service.conf",
+ "hashed_secret": "18c0871af26eb9875c0f840b13211f097c133fd2",
"is_verified": false,
- "line_number": 839,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 24
+ }
+ ],
+ "kube/services/revproxy/helpers.js": [
{
- "hashed_secret": "12917e7235ce486ca51a296b896afa5e3b4fda54",
- "is_secret": false,
+ "type": "Base64 High Entropy String",
+ "filename": "kube/services/revproxy/helpers.js",
+ "hashed_secret": "1d278d3c888d1a2fa7eed622bfc02927ce4049af",
"is_verified": false,
- "line_number": 844,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 10
+ }
+ ],
+ "kube/services/revproxy/helpersTest.js": [
{
- "hashed_secret": "49e05eb75fd04d8f44cf235d4e8eddc30a2b93e5",
- "is_secret": false,
+ "type": "Base64 High Entropy String",
+ "filename": "kube/services/revproxy/helpersTest.js",
+ "hashed_secret": "389c3ec21b7325359051e97ff569b078843d2d37",
"is_verified": false,
- "line_number": 849,
- "type": "Base64 High Entropy String"
+ "line_number": 19
},
{
- "hashed_secret": "aa8ea120ddc5aaa27cb02e0b04ac1c53b249a724",
- "is_secret": false,
+ "type": "JSON Web Token",
+ "filename": "kube/services/revproxy/helpersTest.js",
+ "hashed_secret": "e029d4904cc728879d70030572bf37d4510367cb",
"is_verified": false,
- "line_number": 869,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 22
+ }
+ ],
+ "kube/services/revproxy/revproxy-deploy.yaml": [
{
- "hashed_secret": "b3e00452fd69737cc747d0661fa3b3949a4a0805",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/revproxy/revproxy-deploy.yaml",
+ "hashed_secret": "c7a87a61893a647e29289845cb51e61afb06800b",
"is_verified": false,
- "line_number": 876,
- "type": "Base64 High Entropy String"
+ "line_number": 74
},
{
- "hashed_secret": "af2ceb518ddc689b0e2a03ffebb64d4499817c17",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/revproxy/revproxy-deploy.yaml",
+ "hashed_secret": "b3a4e2dea4c1fae8c58a07a84065b73b3a2d831c",
"is_verified": false,
- "line_number": 887,
- "type": "Base64 High Entropy String"
+ "line_number": 77
},
{
- "hashed_secret": "7da94b235f996b5c65b66c3e70b5eeaf97bab5d4",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/revproxy/revproxy-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
"is_verified": false,
- "line_number": 892,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 80
+ }
+ ],
+ "kube/services/sftp/sftp-deploy.yaml": [
{
- "hashed_secret": "f8363d7113ba35fd06b33afe20c8ad21a3202197",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/sftp/sftp-deploy.yaml",
+ "hashed_secret": "9fdebf62e477d59d25730744c8b3089c67c3db85",
"is_verified": false,
- "line_number": 900,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 39
+ }
+ ],
+ "kube/services/sheepdog/sheepdog-canary-deploy.yaml": [
{
- "hashed_secret": "6902b24068ea12c3a3e31596614aa6fa0fba3c39",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/sheepdog/sheepdog-canary-deploy.yaml",
+ "hashed_secret": "ec9c944c51e87322de8d22e3ca9e2be1ad8fee0d",
"is_verified": false,
- "line_number": 908,
- "type": "Base64 High Entropy String"
+ "line_number": 58
},
{
- "hashed_secret": "2c732c0a0dccfc1588888172188ce9a1abb7166e",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/sheepdog/sheepdog-canary-deploy.yaml",
+ "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc",
"is_verified": false,
- "line_number": 916,
- "type": "Base64 High Entropy String"
+ "line_number": 61
},
{
- "hashed_secret": "c59aac9ab2704f627d29c762e716ba84b15be3f1",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/sheepdog/sheepdog-canary-deploy.yaml",
+ "hashed_secret": "e43756046ad1763d6946575fed0e05130a154bd2",
"is_verified": false,
- "line_number": 921,
- "type": "Base64 High Entropy String"
+ "line_number": 67
},
{
- "hashed_secret": "20249a3c96028e5ad19143d86ec5d2ee233935ed",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/sheepdog/sheepdog-canary-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
"is_verified": false,
- "line_number": 937,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 70
+ }
+ ],
+ "kube/services/sheepdog/sheepdog-deploy.yaml": [
{
- "hashed_secret": "2a57a9814486d6f83257ec94e65d1024819611b8",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/sheepdog/sheepdog-deploy.yaml",
+ "hashed_secret": "ec9c944c51e87322de8d22e3ca9e2be1ad8fee0d",
"is_verified": false,
- "line_number": 942,
- "type": "Base64 High Entropy String"
+ "line_number": 63
},
{
- "hashed_secret": "d5e822897b1f37e6ce1a864e2ba9af8f9bfc5539",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/sheepdog/sheepdog-deploy.yaml",
+ "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc",
"is_verified": false,
- "line_number": 950,
- "type": "Base64 High Entropy String"
+ "line_number": 66
},
{
- "hashed_secret": "dbee1beb29275ad50ef0a68067ca144985beca2c",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/sheepdog/sheepdog-deploy.yaml",
+ "hashed_secret": "e43756046ad1763d6946575fed0e05130a154bd2",
"is_verified": false,
- "line_number": 957,
- "type": "Base64 High Entropy String"
+ "line_number": 72
},
{
- "hashed_secret": "b0cb4b5554183f2c7bc1ca25d902db5769798a7a",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/sheepdog/sheepdog-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
"is_verified": false,
- "line_number": 962,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 75
+ }
+ ],
+ "kube/services/shiny/shiny-deploy.yaml": [
{
- "hashed_secret": "29f79b77802802c5ae2d3c2acb9179280de37914",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/shiny/shiny-deploy.yaml",
+ "hashed_secret": "327a1bbc6dc0ce857472ee9162a3415133862d50",
"is_verified": false,
- "line_number": 967,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 43
+ }
+ ],
+ "kube/services/ssjdispatcher/ssjdispatcher-deploy.yaml": [
{
- "hashed_secret": "18469023a89dd192b5275d8b955c9fd2202e0c03",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/ssjdispatcher/ssjdispatcher-deploy.yaml",
+ "hashed_secret": "7f932449df74fc78573fea502df8a484aef3f69d",
"is_verified": false,
- "line_number": 983,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 61
+ }
+ ],
+ "kube/services/superset/superset-deploy.yaml": [
{
- "hashed_secret": "0d3ce7468071b4e48ba9cd014ade7037dc57ef41",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/superset/superset-deploy.yaml",
+ "hashed_secret": "3e9d1737117ff62b23e37aedc72b522b0134997a",
"is_verified": false,
- "line_number": 991,
- "type": "Base64 High Entropy String"
+ "line_number": 235
},
{
- "hashed_secret": "955d2d24c472b4eb0b4488f935a0f65e38001df8",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/superset/superset-deploy.yaml",
+ "hashed_secret": "6ac08eaa58d425783ff8b5a38fe16ee66c0bce15",
"is_verified": false,
- "line_number": 996,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 311
+ }
+ ],
+ "kube/services/superset/superset-redis.yaml": [
{
- "hashed_secret": "42e05c82cd06a9ed1d15e0f472c2efc4b3254cae",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/superset/superset-redis.yaml",
+ "hashed_secret": "9fe1c31809da38c55b2b64bfab47b92bc5f6b7b9",
"is_verified": false,
- "line_number": 1010,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 265
+ }
+ ],
+ "kube/services/superset/values.yaml": [
{
- "hashed_secret": "7a87fb248397359e9c6ca6e46f39805789059102",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/superset/values.yaml",
+ "hashed_secret": "9a09d4081ddc128a80384712ce6df3578e6bc58e",
"is_verified": false,
- "line_number": 1018,
- "type": "Base64 High Entropy String"
+ "line_number": 173
},
{
- "hashed_secret": "7fbf450bf4ee54f013454f70af3a9743c0909f54",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/superset/values.yaml",
+ "hashed_secret": "118c413f3fc929a1624f4c3e1da1e3d24377a693",
"is_verified": false,
- "line_number": 1034,
- "type": "Base64 High Entropy String"
+ "line_number": 299
},
{
- "hashed_secret": "df8e0babfad52a541f6e470cf3a143402c2c2a1e",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/superset/values.yaml",
+ "hashed_secret": "d2a8d1ddfa75398366cff06545380c73481ec17d",
"is_verified": false,
- "line_number": 1039,
- "type": "Base64 High Entropy String"
+ "line_number": 445
},
{
- "hashed_secret": "6f9bfb49cb818d2fe07592515e4c3f7a0bbd7e0e",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/superset/values.yaml",
+ "hashed_secret": "98a84a63e5633d17e3b27b69695f87aa7189e9dc",
"is_verified": false,
- "line_number": 1044,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 459
+ }
+ ],
+ "kube/services/thor/thor-deploy.yaml": [
{
- "hashed_secret": "9e897caf5658aea914e1034f46663cadb5a76348",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/thor/thor-deploy.yaml",
+ "hashed_secret": "1f3f96a3887209d0dda357e5516231ee9c5cd9a7",
"is_verified": false,
- "line_number": 1054,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 100
+ }
+ ],
+ "kube/services/tube/tube-deploy.yaml": [
{
- "hashed_secret": "3aec99f39b829f94874ccd0a0d90315c6690cb94",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/tube/tube-deploy.yaml",
+ "hashed_secret": "ca253d1c9dece2da0d6fb24ded7bdb849a475966",
"is_verified": false,
- "line_number": 1064,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 58
+ }
+ ],
+ "kube/services/ws-storage/ws-storage-deploy.yaml": [
{
- "hashed_secret": "eca5fc6e4f5f895143d3fcedefc42dfe6e79f918",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/ws-storage/ws-storage-deploy.yaml",
+ "hashed_secret": "ec2d9395e11f353370a4abac21a1565641b35ce9",
"is_verified": false,
- "line_number": 1069,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 66
+ }
+ ],
+ "kube/services/wts/wts-deploy.yaml": [
{
- "hashed_secret": "307a947aa422c67fdefb07178198a004fb2c0d94",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "kube/services/wts/wts-deploy.yaml",
+ "hashed_secret": "5de687ae886f19c3cb68d4980e3f2e77cca3db9e",
"is_verified": false,
- "line_number": 1074,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 65
+ }
+ ],
+ "packer/buildAll.sh": [
{
- "hashed_secret": "0ba2fc9a137313ae1fdda2b5476dedf0595bda3a",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "packer/buildAll.sh",
+ "hashed_secret": "6e1d66a1596528c308e601c10aa0b92d53606ab9",
"is_verified": false,
- "line_number": 1083,
- "type": "Base64 High Entropy String"
+ "line_number": 15
}
],
- "tf_files/aws/cognito/README.md": [
+ "packer/variables.example.json": [
{
- "hashed_secret": "f6920f370a30262b7dd70e97293c73ec89739b70",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "packer/variables.example.json",
+ "hashed_secret": "a3a0648a036bebf78ba1a1eb498a66081059da10",
"is_verified": false,
- "line_number": 106,
- "type": "Secret Keyword"
+ "line_number": 5
}
],
"tf_files/aws/commons/README.md": [
{
- "hashed_secret": "d02e53411e8cb4cd709778f173f7bc9a3455f8ed",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "tf_files/aws/commons/README.md",
+ "hashed_secret": "5f02a3fb14ab1ce5c18c362b04b8ffc603ea5951",
"is_verified": false,
- "line_number": 60,
- "type": "Secret Keyword"
+ "line_number": 60
},
{
- "hashed_secret": "9dc0da3613af850c5a018b0a88a5626fb8888e4e",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "tf_files/aws/commons/README.md",
+ "hashed_secret": "49cfceed8aa8df159e53aa5c5951cad48a3f1216",
"is_verified": false,
- "line_number": 78,
- "type": "Secret Keyword"
+ "line_number": 67
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "tf_files/aws/commons/README.md",
+ "hashed_secret": "18ad13589ca5fb3c432d7d9f0fe49f8ed6e2c478",
+ "is_verified": false,
+ "line_number": 70
}
],
"tf_files/aws/eks/sample.tfvars": [
{
+ "type": "Hex High Entropy String",
+ "filename": "tf_files/aws/eks/sample.tfvars",
"hashed_secret": "83c1003f406f34fba4d6279a948fee3abc802884",
- "is_secret": false,
"is_verified": false,
- "line_number": 107,
- "type": "Hex High Entropy String"
+ "line_number": 107
}
],
"tf_files/aws/eks/variables.tf": [
{
+ "type": "Hex High Entropy String",
+ "filename": "tf_files/aws/eks/variables.tf",
"hashed_secret": "83c1003f406f34fba4d6279a948fee3abc802884",
- "is_secret": false,
"is_verified": false,
- "line_number": 133,
- "type": "Hex High Entropy String"
+ "line_number": 133
}
],
"tf_files/aws/modules/common-logging/README.md": [
{
+ "type": "Base64 High Entropy String",
+ "filename": "tf_files/aws/modules/common-logging/README.md",
"hashed_secret": "83442aa5a16cb1992731c32367ef464564388017",
- "is_secret": false,
- "is_verified": false,
- "line_number": 57,
- "type": "Base64 High Entropy String"
- },
- {
- "hashed_secret": "fd4a4637ac99de2c1d89155d66d1f3de15d231a2",
- "is_secret": false,
"is_verified": false,
- "line_number": 59,
- "type": "Hex High Entropy String"
+ "line_number": 57
}
],
"tf_files/aws/modules/common-logging/lambda_function.py": [
{
+ "type": "Hex High Entropy String",
+ "filename": "tf_files/aws/modules/common-logging/lambda_function.py",
"hashed_secret": "061765d6854d72f03a6527610d5b6822c9d516de",
- "is_secret": false,
"is_verified": false,
- "line_number": 18,
- "type": "Hex High Entropy String"
+ "line_number": 18
},
{
+ "type": "Base64 High Entropy String",
+ "filename": "tf_files/aws/modules/common-logging/lambda_function.py",
"hashed_secret": "61df81a188bb4dba6ae6128ff7e2c9c6a6f736ef",
- "is_secret": false,
"is_verified": false,
- "line_number": 18,
- "type": "Base64 High Entropy String"
+ "line_number": 18
+ },
+ {
+ "type": "Base64 High Entropy String",
+ "filename": "tf_files/aws/modules/common-logging/lambda_function.py",
+ "hashed_secret": "a4752db26b4774d3429878f36ceb7b61805ffd94",
+ "is_verified": false,
+ "line_number": 18
},
{
- "hashed_secret": "a4667450661f32f7ad0f06e2f893a8fee9f18e38",
- "is_secret": false,
+ "type": "Hex High Entropy String",
+ "filename": "tf_files/aws/modules/common-logging/lambda_function.py",
+ "hashed_secret": "b979d8d0c0e8413c20a5597f789e31f0a2b2ff3a",
"is_verified": false,
- "line_number": 18,
- "type": "Hex High Entropy String"
+ "line_number": 18
},
{
+ "type": "Hex High Entropy String",
+ "filename": "tf_files/aws/modules/common-logging/lambda_function.py",
"hashed_secret": "4f9fd96d3926f2c53ab0261d33f1d1a85a6a77ff",
- "is_secret": false,
"is_verified": false,
- "line_number": 30,
- "type": "Hex High Entropy String"
+ "line_number": 30
}
],
"tf_files/aws/modules/common-logging/testLambda.py": [
{
+ "type": "Hex High Entropy String",
+ "filename": "tf_files/aws/modules/common-logging/testLambda.py",
"hashed_secret": "061765d6854d72f03a6527610d5b6822c9d516de",
- "is_secret": false,
"is_verified": false,
- "line_number": 5,
- "type": "Hex High Entropy String"
+ "line_number": 5
},
{
+ "type": "Base64 High Entropy String",
+ "filename": "tf_files/aws/modules/common-logging/testLambda.py",
"hashed_secret": "61df81a188bb4dba6ae6128ff7e2c9c6a6f736ef",
- "is_secret": false,
"is_verified": false,
- "line_number": 5,
- "type": "Base64 High Entropy String"
+ "line_number": 5
+ },
+ {
+ "type": "Base64 High Entropy String",
+ "filename": "tf_files/aws/modules/common-logging/testLambda.py",
+ "hashed_secret": "a4752db26b4774d3429878f36ceb7b61805ffd94",
+ "is_verified": false,
+ "line_number": 5
},
{
- "hashed_secret": "a4667450661f32f7ad0f06e2f893a8fee9f18e38",
- "is_secret": false,
+ "type": "Hex High Entropy String",
+ "filename": "tf_files/aws/modules/common-logging/testLambda.py",
+ "hashed_secret": "b979d8d0c0e8413c20a5597f789e31f0a2b2ff3a",
"is_verified": false,
- "line_number": 5,
- "type": "Hex High Entropy String"
+ "line_number": 5
+ },
+ {
+ "type": "Hex High Entropy String",
+ "filename": "tf_files/aws/modules/common-logging/testLambda.py",
+ "hashed_secret": "4f9fd96d3926f2c53ab0261d33f1d1a85a6a77ff",
+ "is_verified": false,
+ "line_number": 10
}
],
"tf_files/aws/modules/eks/variables.tf": [
{
+ "type": "Hex High Entropy String",
+ "filename": "tf_files/aws/modules/eks/variables.tf",
"hashed_secret": "83c1003f406f34fba4d6279a948fee3abc802884",
- "is_secret": false,
"is_verified": false,
- "line_number": 113,
- "type": "Hex High Entropy String"
+ "line_number": 113
}
],
"tf_files/aws/modules/management-logs/README.md": [
{
+ "type": "Base64 High Entropy String",
+ "filename": "tf_files/aws/modules/management-logs/README.md",
"hashed_secret": "83442aa5a16cb1992731c32367ef464564388017",
- "is_secret": false,
"is_verified": false,
- "line_number": 54,
- "type": "Base64 High Entropy String"
- },
- {
- "hashed_secret": "fd4a4637ac99de2c1d89155d66d1f3de15d231a2",
- "is_secret": false,
- "is_verified": false,
- "line_number": 56,
- "type": "Hex High Entropy String"
+ "line_number": 54
}
],
"tf_files/aws/modules/management-logs/lambda_function.py": [
{
+ "type": "Hex High Entropy String",
+ "filename": "tf_files/aws/modules/management-logs/lambda_function.py",
"hashed_secret": "061765d6854d72f03a6527610d5b6822c9d516de",
- "is_secret": false,
"is_verified": false,
- "line_number": 18,
- "type": "Hex High Entropy String"
+ "line_number": 18
},
{
+ "type": "Base64 High Entropy String",
+ "filename": "tf_files/aws/modules/management-logs/lambda_function.py",
"hashed_secret": "61df81a188bb4dba6ae6128ff7e2c9c6a6f736ef",
- "is_secret": false,
"is_verified": false,
- "line_number": 18,
- "type": "Base64 High Entropy String"
+ "line_number": 18
+ },
+ {
+ "type": "Base64 High Entropy String",
+ "filename": "tf_files/aws/modules/management-logs/lambda_function.py",
+ "hashed_secret": "a4752db26b4774d3429878f36ceb7b61805ffd94",
+ "is_verified": false,
+ "line_number": 18
},
{
- "hashed_secret": "a4667450661f32f7ad0f06e2f893a8fee9f18e38",
- "is_secret": false,
+ "type": "Hex High Entropy String",
+ "filename": "tf_files/aws/modules/management-logs/lambda_function.py",
+ "hashed_secret": "b979d8d0c0e8413c20a5597f789e31f0a2b2ff3a",
"is_verified": false,
- "line_number": 18,
- "type": "Hex High Entropy String"
+ "line_number": 18
},
{
+ "type": "Hex High Entropy String",
+ "filename": "tf_files/aws/modules/management-logs/lambda_function.py",
"hashed_secret": "4f9fd96d3926f2c53ab0261d33f1d1a85a6a77ff",
- "is_secret": false,
"is_verified": false,
- "line_number": 30,
- "type": "Hex High Entropy String"
+ "line_number": 30
}
],
"tf_files/aws/modules/management-logs/testLambda.py": [
{
+ "type": "Hex High Entropy String",
+ "filename": "tf_files/aws/modules/management-logs/testLambda.py",
"hashed_secret": "061765d6854d72f03a6527610d5b6822c9d516de",
- "is_secret": false,
"is_verified": false,
- "line_number": 5,
- "type": "Hex High Entropy String"
+ "line_number": 5
},
{
+ "type": "Base64 High Entropy String",
+ "filename": "tf_files/aws/modules/management-logs/testLambda.py",
"hashed_secret": "61df81a188bb4dba6ae6128ff7e2c9c6a6f736ef",
- "is_secret": false,
"is_verified": false,
- "line_number": 5,
- "type": "Base64 High Entropy String"
+ "line_number": 5
},
{
- "hashed_secret": "a4667450661f32f7ad0f06e2f893a8fee9f18e38",
- "is_secret": false,
+ "type": "Base64 High Entropy String",
+ "filename": "tf_files/aws/modules/management-logs/testLambda.py",
+ "hashed_secret": "a4752db26b4774d3429878f36ceb7b61805ffd94",
"is_verified": false,
- "line_number": 5,
- "type": "Hex High Entropy String"
+ "line_number": 5
},
{
- "hashed_secret": "3cf8eb4e9254e1d6cc523da01f8b798b9a83101a",
- "is_secret": false,
+ "type": "Hex High Entropy String",
+ "filename": "tf_files/aws/modules/management-logs/testLambda.py",
+ "hashed_secret": "b979d8d0c0e8413c20a5597f789e31f0a2b2ff3a",
"is_verified": false,
- "line_number": 6,
- "type": "Base64 High Entropy String"
+ "line_number": 5
},
{
- "hashed_secret": "51118900cd675df1b44f254057398f3e52902a5d",
- "is_secret": false,
+ "type": "Base64 High Entropy String",
+ "filename": "tf_files/aws/modules/management-logs/testLambda.py",
+ "hashed_secret": "3cf8eb4e9254e1d6cc523da01f8b798b9a83101a",
"is_verified": false,
- "line_number": 6,
- "type": "Hex High Entropy String"
+ "line_number": 6
},
{
+ "type": "Hex High Entropy String",
+ "filename": "tf_files/aws/modules/management-logs/testLambda.py",
"hashed_secret": "60a6dfc8d43cd2f5c6292899fc2f94f2d4fc32c4",
- "is_secret": false,
"is_verified": false,
- "line_number": 6,
- "type": "Hex High Entropy String"
+ "line_number": 6
+ },
+ {
+ "type": "Base64 High Entropy String",
+ "filename": "tf_files/aws/modules/management-logs/testLambda.py",
+ "hashed_secret": "d484ccb4ced21e0149078377f14b913bf5c613d0",
+ "is_verified": false,
+ "line_number": 6
}
],
"tf_files/aws/slurm/README.md": [
{
- "hashed_secret": "fd85d792fa56981cf6a8d2a5c0857c74af86e99d",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "tf_files/aws/slurm/README.md",
+ "hashed_secret": "c16686250cd583de64e02a47a8b194cd5578b2a1",
"is_verified": false,
- "line_number": 83,
- "type": "Secret Keyword"
+ "line_number": 83
}
],
"tf_files/azure/cloud.tf": [
{
- "hashed_secret": "7c1a4b52b64e4106041971c345a1f3eab58fb2a4",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "tf_files/azure/cloud.tf",
+ "hashed_secret": "38d930120a56321ceaa147b2bc1f19db53a0b993",
"is_verified": false,
- "line_number": 424,
- "type": "Secret Keyword"
+ "line_number": 361
}
],
"tf_files/gcp-bwg/roots/commons_setup/variables/answerfile-commons_setup-001.template.tfvars": [
{
+ "type": "Secret Keyword",
+ "filename": "tf_files/gcp-bwg/roots/commons_setup/variables/answerfile-commons_setup-001.template.tfvars",
"hashed_secret": "f865b53623b121fd34ee5426c792e5c33af8c227",
- "is_secret": false,
"is_verified": false,
- "line_number": 231,
- "type": "Secret Keyword"
+ "line_number": 231
}
],
"tf_files/gcp-bwg/roots/templates/answerfile-commons_setup-001.template.tfvars": [
{
+ "type": "Secret Keyword",
+ "filename": "tf_files/gcp-bwg/roots/templates/answerfile-commons_setup-001.template.tfvars",
"hashed_secret": "f865b53623b121fd34ee5426c792e5c33af8c227",
- "is_secret": false,
"is_verified": false,
- "line_number": 231,
- "type": "Secret Keyword"
+ "line_number": 231
}
],
"tf_files/gcp-bwg/roots/templates/answerfile-env-tenant.user.tfvars_NO_APP_SETUP": [
{
+ "type": "Secret Keyword",
+ "filename": "tf_files/gcp-bwg/roots/templates/answerfile-env-tenant.user.tfvars_NO_APP_SETUP",
"hashed_secret": "f865b53623b121fd34ee5426c792e5c33af8c227",
- "is_secret": false,
"is_verified": false,
- "line_number": 262,
- "type": "Secret Keyword"
+ "line_number": 262
}
],
- "tf_files/gcp/commons/sample.tfvars": [
+ "tf_files/gcp/commons/root.tf": [
{
- "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "tf_files/gcp/commons/root.tf",
+ "hashed_secret": "013b6be0bd7ef38a9ee3472cec65c208a19421e6",
"is_verified": false,
- "line_number": 11,
- "type": "Secret Keyword"
- },
+ "line_number": 65
+ }
+ ],
+ "tf_files/gcp/commons/sample.tfvars": [
{
- "hashed_secret": "8db3b325254b6389ca194d829d2fc923dc0a945d",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "tf_files/gcp/commons/sample.tfvars",
+ "hashed_secret": "6b44a330b450ee550c081410c6b705dfeaa105ce",
"is_verified": false,
- "line_number": 26,
- "type": "Secret Keyword"
+ "line_number": 26
},
{
- "hashed_secret": "253c7b5e7c83a86346fc4501495b130813f08105",
- "is_secret": false,
- "is_verified": false,
- "line_number": 37,
- "type": "Secret Keyword"
- }
- ],
- "tf_files/shared/modules/k8s_configs/creds.tpl": [
- {
- "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c",
- "is_secret": false,
+ "type": "Secret Keyword",
+ "filename": "tf_files/gcp/commons/sample.tfvars",
+ "hashed_secret": "791191ef9eafc75f5dd28e37df837b4991556876",
"is_verified": false,
- "line_number": 8,
- "type": "Secret Keyword"
+ "line_number": 31
}
]
},
- "version": "0.13.1",
- "word_list": {
- "file": null,
- "hash": null
- }
+ "generated_at": "2024-03-07T21:26:14Z"
}
diff --git a/Docker/awshelper/Dockerfile b/Docker/awshelper/Dockerfile
index d85d23082..f3dd7b60e 100644
--- a/Docker/awshelper/Dockerfile
+++ b/Docker/awshelper/Dockerfile
@@ -38,6 +38,9 @@ RUN apt-get update && apt-get upgrade -y \
wget \
gettext-base
+#can remove once https://github.com/yaml/pyyaml/issues/724 is solved
+RUN pip install pyyaml==5.3.1
+
RUN python3 -m pip install --upgrade pip \
&& python3 -m pip install --upgrade setuptools \
&& python3 -m pip install -U crcmod \
@@ -56,15 +59,9 @@ RUN export CLOUD_SDK_REPO="cloud-sdk" && \
curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - && \
curl -sL https://deb.nodesource.com/setup_14.x | bash - && \
apt-get update && \
- apt-get install -y google-cloud-sdk \
- google-cloud-sdk-cbt \
- kubectl && \
+ apt-get install -y kubectl && \
apt-get install -y --no-install-recommends nodejs && \
- rm -rf /var/lib/apt/lists/* \
- gcloud config set core/disable_usage_reporting true && \
- gcloud config set component_manager/disable_update_check true && \
- gcloud config set metrics/environment github_docker_image && \
- gcloud --version && \
+ rm -rf /var/lib/apt/lists/* && \
kubectl version --client && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* /var/log/*
diff --git a/Docker/jenkins/Jenkins-CI-Worker/Dockerfile b/Docker/jenkins/Jenkins-CI-Worker/Dockerfile
index afb1fca9f..6eeb8f4fd 100644
--- a/Docker/jenkins/Jenkins-CI-Worker/Dockerfile
+++ b/Docker/jenkins/Jenkins-CI-Worker/Dockerfile
@@ -1,11 +1,10 @@
-FROM jenkins/jnlp-slave:4.13.3-1-jdk11
+FROM jenkins/inbound-agent:jdk21
USER root
ENV DEBIAN_FRONTEND=noninteractive
-# install python
-RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils python python-setuptools python-dev python-pip python3 python3-pip python3-venv build-essential zip unzip jq less vim gettext-base wget
+RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils build-essential zip unzip jq less vim gettext-base
RUN set -xe && apt-get update \
&& apt-get install -y lsb-release \
@@ -16,7 +15,6 @@ RUN set -xe && apt-get update \
libffi-dev \
libssl-dev \
libghc-regex-pcre-dev \
- linux-headers-amd64 \
libcurl4-openssl-dev \
libncurses5-dev \
libncursesw5-dev \
@@ -27,20 +25,19 @@ RUN set -xe && apt-get update \
libbz2-dev \
libexpat1-dev \
liblzma-dev \
- python-virtualenv \
lua5.3 \
r-base \
software-properties-common \
sudo \
tk-dev \
+ wget \
zlib1g-dev \
zsh \
ca-certificates-java \
- openjdk-11-jre-headless \
&& ln -s /usr/bin/lua5.3 /usr/local/bin/lua
# Use jdk11
-ENV JAVA_HOME="/usr/lib/jvm/java-11-openjdk-amd64"
+ENV JAVA_HOME="/opt/java/openjdk"
ENV PATH="$JAVA_HOME/bin:$PATH"
COPY ./certfix.sh /certfix.sh
@@ -56,32 +53,34 @@ RUN export CLOUD_SDK_REPO="cloud-sdk-$(lsb_release -c -s)" \
google-cloud-sdk-cbt \
kubectl
+# install go - https://go.dev/doc/install
+RUN wget https://go.dev/dl/go1.21.0.linux-amd64.tar.gz \
+ && rm -rf /usr/local/go \
+ && tar -C /usr/local -xzf go1.21.0.linux-amd64.tar.gz
+ENV PATH="$PATH:/usr/local/go/bin"
+RUN go version
+
#
# install docker tools:
-# * https://docs.docker.com/install/linux/docker-ce/debian/#install-docker-ce-1
-# * https://docs.docker.com/compose/install/#install-compose
#
-RUN curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - \
- && add-apt-repository \
- "deb [arch=amd64] https://download.docker.com/linux/debian \
- $(lsb_release -cs) \
- stable" \
- && apt-get update \
- && apt-get install -y docker-ce \
- && curl -L "https://github.com/docker/compose/releases/download/1.23.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose \
- && chmod a+rx /usr/local/bin/docker-compose
+RUN sudo install -m 0755 -d /etc/apt/keyrings \
+ && curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg \
+ && sudo chmod a+r /etc/apt/keyrings/docker.gpg \
+ && echo \
+ "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian \
+ "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \
+ sudo tee /etc/apt/sources.list.d/docker.list > /dev/null \
+ && apt-get update && apt-get install -y docker-ce
# install nodejs
RUN curl -sL https://deb.nodesource.com/setup_14.x | bash -
-RUN apt-get update && apt-get install -y nodejs
+RUN apt-get update && apt-get install -y nodejs npm
-# add psql: https://www.postgresql.org/download/linux/debian/
-RUN DISTRO="$(lsb_release -c -s)" \
- && echo "deb http://apt.postgresql.org/pub/repos/apt/ ${DISTRO}-pgdg main" > /etc/apt/sources.list.d/pgdg.list \
- && wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \
- && apt-get update \
- && apt-get install -y postgresql-client-13 libpq-dev \
- && rm -rf /var/lib/apt/lists/*
+# Install postgres 13 client
+RUN curl -fsSL https://www.postgresql.org/media/keys/ACCC4CF8.asc| gpg --dearmor -o /etc/apt/trusted.gpg.d/postgresql.gpg && \
+ echo "deb http://apt.postgresql.org/pub/repos/apt/ `lsb_release -cs`-pgdg main" | tee /etc/apt/sources.list.d/pgdg.list && \
+ apt-get update && \
+ apt-get install -y postgresql-client-13
# Copy sh script responsible for installing Python
COPY install-python3.8.sh /root/tmp/install-python3.8.sh
@@ -98,7 +97,7 @@ RUN sed -i 's/python3/python3.8/' /usr/bin/lsb_release && \
sed -i 's/python3/python3.8/' /usr/bin/add-apt-repository
# install aws cli, poetry, pytest, etc.
-RUN set -xe && python3.8 -m pip install --upgrade pip && python3.8 -m pip install awscli --upgrade && python3.8 -m pip install pytest --upgrade && python3.8 -m pip install poetry && python3.8 -m pip install PyYAML --upgrade && python3.8 -m pip install lxml --upgrade && python3.8 -m pip install yq --upgrade && python3.8 -m pip install datadog --upgrade
+RUN set -xe && python3.8 -m pip install --upgrade pip setuptools && python3.8 -m pip install awscli --upgrade && python3.8 -m pip install pytest --upgrade && python3.8 -m pip install poetry && python3.8 -m pip install PyYAML --upgrade && python3.8 -m pip install lxml --upgrade && python3.8 -m pip install yq --upgrade && python3.8 -m pip install datadog --upgrade
# install terraform
RUN curl -o /tmp/terraform.zip https://releases.hashicorp.com/terraform/0.11.15/terraform_0.11.15_linux_amd64.zip \
@@ -117,6 +116,9 @@ RUN curl -sS -o - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-ke
&& apt-get -y update \
&& apt-get -y install google-chrome-stable
+# data-simulator needs "/usr/share/dict/words" to generate data that isn't random strings
+RUN apt-get install --reinstall wamerican
+
# update /etc/sudoers
RUN sed 's/^%sudo/#%sudo/' /etc/sudoers > /etc/sudoers.bak \
&& /bin/echo -e "\n%sudo ALL=(ALL:ALL) NOPASSWD:ALL\n" >> /etc/sudoers.bak \
diff --git a/Docker/jenkins/Jenkins-Worker/Dockerfile b/Docker/jenkins/Jenkins-Worker/Dockerfile
index 0ad941def..fec6b3203 100644
--- a/Docker/jenkins/Jenkins-Worker/Dockerfile
+++ b/Docker/jenkins/Jenkins-Worker/Dockerfile
@@ -1,18 +1,14 @@
-FROM jenkins/jnlp-slave:4.13.3-1-jdk11
+FROM jenkins/inbound-agent:jdk21
USER root
ENV DEBIAN_FRONTEND=noninteractive
-# install python and pip and aws cli
-RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils python python-setuptools python-dev python-pip python3 python3-pip build-essential libgit2-dev zip unzip less vim gettext-base wget
-RUN set -xe && python -m pip install awscli --upgrade && python -m pip install pytest --upgrade && python -m pip install PyYAML --upgrade && python -m pip install lxml --upgrade
-RUN set -xe && python3 -m pip install pytest --upgrade && python3 -m pip install PyYAML --upgrade
-RUN set -xe && python -m pip install yq --upgrade && python3 -m pip install yq --upgrade
-RUN set -xe && python3 -m pip install pandas --upgrade
+RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils build-essential zip unzip jq less vim gettext-base
RUN apt-get update \
&& apt-get install -y lsb-release \
+ git \
apt-transport-https \
r-base \
libffi-dev \
@@ -35,22 +31,30 @@ RUN apt-get update \
lua5.3 \
software-properties-common \
sudo \
+ wget \
&& ln -s /usr/bin/lua5.3 /usr/local/bin/lua
# install Ruby.
RUN apt-get install -y ruby-full
-# install GIT from buster-backports
-RUN echo "deb http://deb.debian.org/debian buster-backports main" > /etc/apt/sources.list.d/buster-backports.list \
- && apt-get update \
- && apt-get -t=buster-backports -y install git=1:2.30.*
+#
+# install docker tools:
+#
+RUN sudo install -m 0755 -d /etc/apt/keyrings \
+ && curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg \
+ && sudo chmod a+r /etc/apt/keyrings/docker.gpg \
+ && echo \
+ "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian \
+ "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \
+ sudo tee /etc/apt/sources.list.d/docker.list > /dev/null \
+ && apt-get update && apt-get install -y docker-ce
# install k6 to run load tests
RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys C5AD17C747E3415A3642D57D77C6C491D6AC1D69 \
&& echo "deb https://dl.k6.io/deb stable main" | tee /etc/apt/sources.list.d/k6.list \
&& apt-get update \
&& apt-get install k6
-
+
# install xk6-browser
RUN cd /opt && wget --quiet https://github.com/grafana/xk6-browser/releases/download/v0.3.0/xk6-browser-v0.3.0-linux-amd64.tar.gz \
&& tar -xvzf /opt/xk6-browser-v0.3.0-linux-amd64.tar.gz
@@ -71,15 +75,13 @@ RUN wget https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 && c
#
# install docker tools:
-# * https://docs.docker.com/install/linux/docker-ce/debian/#install-docker-ce-1
-# * https://docs.docker.com/compose/install/#install-compose
#
RUN curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - \
&& /usr/bin/add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/debian \
$(lsb_release -c -s) \
stable" \
&& apt-get update \
- && apt-get install -y docker-ce \
+ && apt-get install -y docker-ce-cli \
&& curl -L "https://github.com/docker/compose/releases/download/1.23.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose \
&& chmod a+rx /usr/local/bin/docker-compose
@@ -111,25 +113,26 @@ RUN DISTRO="$(lsb_release -c -s)" \
&& rm -rf /var/lib/apt/lists/*
# Copy sh script responsible for installing Python
-COPY install-python3.8.sh /root/tmp/install-python3.8.sh
+COPY install-python3.9.sh /root/tmp/install-python3.9.sh
-# Run the script responsible for installing Python 3.8.0 and link it to /usr/bin/python
-RUN chmod +x /root/tmp/install-python3.8.sh; sync && \
- bash /root/tmp/install-python3.8.sh && \
- rm -rf /root/tmp/install-python3.8.sh && \
+# Run the script responsible for installing Python 3.9.0 and link it to /usr/bin/python
+RUN chmod +x /root/tmp/install-python3.9.sh; sync && \
+ bash /root/tmp/install-python3.9.sh && \
+ rm -rf /root/tmp/install-python3.9.sh && \
unlink /usr/bin/python3 && \
- ln -s /usr/local/bin/python3.8 /usr/bin/python3
+ ln -s /usr/local/bin/python3.9 /usr/bin/python && \
+ ln -s /usr/local/bin/python3.9 /usr/bin/python3
RUN env
RUN which python
-RUN which python3.8
+RUN which python3.9
# Fix shebang for lsb_release
-RUN sed -i 's/python3/python3.7/' /usr/bin/lsb_release && \
- sed -i 's/python3/python3.7/' /usr/bin/add-apt-repository
+RUN sed -i 's/python3/python3.9/' /usr/bin/lsb_release && \
+ sed -i 's/python3/python3.9/' /usr/bin/add-apt-repository
# install aws cli, poetry, pytest, etc.
-RUN set -xe && python3.8 -m pip install --upgrade pip && python3.8 -m pip install awscli --upgrade && python3.8 -m pip install pytest --upgrade && python3.8 -m pip install poetry && python3.8 -m pip install PyYAML --upgrade && python3.8 -m pip install lxml --upgrade && python3.8 -m pip install yq --upgrade
+RUN set -xe && python3.9 -m pip install --upgrade pip && python3.9 -m pip install awscli --upgrade && python3.9 -m pip install pytest --upgrade && python3.9 -m pip install poetry && python3.9 -m pip install PyYAML --upgrade && python3.9 -m pip install lxml --upgrade && python3.9 -m pip install yq --upgrade
# update /etc/sudoers
RUN sed 's/^%sudo/#%sudo/' /etc/sudoers > /etc/sudoers.bak \
diff --git a/Docker/jenkins/Jenkins-Worker/install-python3.8.sh b/Docker/jenkins/Jenkins-Worker/install-python3.8.sh
deleted file mode 100755
index a01d59420..000000000
--- a/Docker/jenkins/Jenkins-Worker/install-python3.8.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/bash
-wget https://www.python.org/ftp/python/3.8.0/Python-3.8.0.tar.xz
-tar xf Python-3.8.0.tar.xz
-rm Python-3.8.0.tar.xz
-cd Python-3.8.0
-./configure
-make
-make altinstall
diff --git a/Docker/jenkins/Jenkins-Worker/install-python3.9.sh b/Docker/jenkins/Jenkins-Worker/install-python3.9.sh
new file mode 100755
index 000000000..30ee05993
--- /dev/null
+++ b/Docker/jenkins/Jenkins-Worker/install-python3.9.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+wget https://www.python.org/ftp/python/3.9.0/Python-3.9.0.tar.xz
+tar xf Python-3.9.0.tar.xz
+rm Python-3.9.0.tar.xz
+cd Python-3.9.0
+./configure
+make
+make altinstall
diff --git a/Docker/jenkins/Jenkins/Dockerfile b/Docker/jenkins/Jenkins/Dockerfile
index a872ee1dd..04ebe5864 100644
--- a/Docker/jenkins/Jenkins/Dockerfile
+++ b/Docker/jenkins/Jenkins/Dockerfile
@@ -1,11 +1,10 @@
-FROM jenkins/jenkins:2.375
+FROM jenkins/jenkins:2.426.3-lts-jdk21
USER root
ENV DEBIAN_FRONTEND=noninteractive
-# install python
-RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils python python-setuptools python-dev python3 python3-pip python3-venv build-essential zip unzip jq less vim gettext-base wget
+RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils build-essential zip unzip jq less vim gettext-base
RUN set -xe && apt-get update \
&& apt-get install -y lsb-release \
@@ -30,6 +29,7 @@ RUN set -xe && apt-get update \
software-properties-common \
sudo \
tk-dev \
+ wget \
zlib1g-dev \
zsh \
&& ln -s /usr/bin/lua5.3 /usr/local/bin/lua
@@ -45,18 +45,15 @@ RUN export CLOUD_SDK_REPO="cloud-sdk-$(lsb_release -c -s)" \
#
# install docker tools:
-# * https://docs.docker.com/install/linux/docker-ce/debian/#install-docker-ce-1
-# * https://docs.docker.com/compose/install/#install-compose
#
-RUN curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - \
- && add-apt-repository \
- "deb [arch=amd64] https://download.docker.com/linux/debian \
- $(lsb_release -cs) \
- stable" \
- && apt-get update \
- && apt-get install -y docker-ce \
- && curl -L "https://github.com/docker/compose/releases/download/1.23.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose \
- && chmod a+rx /usr/local/bin/docker-compose
+RUN sudo install -m 0755 -d /etc/apt/keyrings \
+ && curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg \
+ && sudo chmod a+r /etc/apt/keyrings/docker.gpg \
+ && echo \
+ "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian \
+ "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \
+ sudo tee /etc/apt/sources.list.d/docker.list > /dev/null \
+ && apt-get update && apt-get install -y docker-ce
# install nodejs
RUN curl -sL https://deb.nodesource.com/setup_18.x | bash -
@@ -81,8 +78,8 @@ RUN chmod +x /root/tmp/install-python3.8.sh; sync && \
ln -s /Python-3.8.0/python /usr/bin/python3
# Fix shebang for lsb_release
-RUN sed -i 's/python3/python3.5/' /usr/bin/lsb_release && \
- sed -i 's/python3/python3.5/' /usr/bin/add-apt-repository
+RUN sed -i 's/python3/python3.8/' /usr/bin/lsb_release && \
+ sed -i 's/python3/python3.8/' /usr/bin/add-apt-repository
# install aws cli, poetry, pytest, etc.
RUN set -xe && python3 -m pip install --upgrade pip && python3 -m pip install awscli --upgrade && python3 -m pip install pytest --upgrade && python3 -m pip install poetry && python3 -m pip install PyYAML --upgrade && python3 -m pip install lxml --upgrade && python3 -m pip install yq --upgrade
diff --git a/Docker/jenkins/Jenkins2/Dockerfile b/Docker/jenkins/Jenkins2/Dockerfile
index 59cb5672e..e6b73bc76 100644
--- a/Docker/jenkins/Jenkins2/Dockerfile
+++ b/Docker/jenkins/Jenkins2/Dockerfile
@@ -1,11 +1,10 @@
-FROM jenkins/jenkins:2.375
+FROM jenkins/jenkins:2.426.3-lts-jdk21
USER root
ENV DEBIAN_FRONTEND=noninteractive
-# install python
-RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils python python-setuptools python-dev python3 python3-pip python3-venv build-essential zip unzip jq less vim gettext-base wget
+RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils build-essential zip unzip jq less vim gettext-base
RUN set -xe && apt-get update \
&& apt-get install -y lsb-release \
@@ -30,6 +29,7 @@ RUN set -xe && apt-get update \
software-properties-common \
sudo \
tk-dev \
+ wget \
zlib1g-dev \
zsh \
&& ln -s /usr/bin/lua5.3 /usr/local/bin/lua
@@ -45,18 +45,16 @@ RUN export CLOUD_SDK_REPO="cloud-sdk-$(lsb_release -c -s)" \
#
# install docker tools:
-# * https://docs.docker.com/install/linux/docker-ce/debian/#install-docker-ce-1
-# * https://docs.docker.com/compose/install/#install-compose
#
-RUN curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - \
- && add-apt-repository \
- "deb [arch=amd64] https://download.docker.com/linux/debian \
- $(lsb_release -cs) \
- stable" \
- && apt-get update \
- && apt-get install -y docker-ce \
- && curl -L "https://github.com/docker/compose/releases/download/1.23.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose \
- && chmod a+rx /usr/local/bin/docker-compose
+RUN sudo install -m 0755 -d /etc/apt/keyrings \
+ && curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg \
+ && sudo chmod a+r /etc/apt/keyrings/docker.gpg \
+ && echo \
+ "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian \
+ "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \
+ sudo tee /etc/apt/sources.list.d/docker.list > /dev/null \
+ && apt-get update && apt-get install -y docker-ce
+
# install nodejs
RUN curl -sL https://deb.nodesource.com/setup_18.x | bash -
diff --git a/Docker/nginx-prometheus-exporter-wrapper/Dockerfile b/Docker/nginx-prometheus-exporter-wrapper/Dockerfile
index 5134ce440..9b883b0ab 100644
--- a/Docker/nginx-prometheus-exporter-wrapper/Dockerfile
+++ b/Docker/nginx-prometheus-exporter-wrapper/Dockerfile
@@ -1,4 +1,4 @@
-FROM golang:1.14-alpine as build-deps
+FROM golang:1.21.8-alpine as build-deps
RUN apk update && apk add --no-cache git gcc curl bash
diff --git a/Docker/python-nginx/python2.7-alpine3.7/Dockerfile b/Docker/python-nginx/python2.7-alpine3.7/Dockerfile
index 651bc1e7e..c4a934df5 100644
--- a/Docker/python-nginx/python2.7-alpine3.7/Dockerfile
+++ b/Docker/python-nginx/python2.7-alpine3.7/Dockerfile
@@ -1,6 +1,6 @@
# python2.7 microservice base image
-FROM alpine:3.7
+FROM alpine:3.16.9
ENV DEBIAN_FRONTEND=noninteractive
diff --git a/Docker/python-nginx/python3.9-buster/uwsgi.conf b/Docker/python-nginx/python3.9-buster/uwsgi.conf
index 97c53335d..7bafdb48d 100644
--- a/Docker/python-nginx/python3.9-buster/uwsgi.conf
+++ b/Docker/python-nginx/python3.9-buster/uwsgi.conf
@@ -15,7 +15,19 @@ server {
server {
listen 80;
- large_client_header_buffers 4 64k;
+ proxy_buffer_size 16k;
+ proxy_buffers 4 16k;
+ proxy_busy_buffers_size 32k;
+
+ uwsgi_buffer_size 16k;
+ uwsgi_buffers 4 16k;
+ uwsgi_busy_buffers_size 32k;
+
+ client_header_buffer_size 32k;
+ large_client_header_buffers 4 16k;
+
+ proxy_buffering off;
+ uwsgi_buffering off;
location / {
uwsgi_param REMOTE_ADDR $http_x_forwarded_for if_not_empty;
diff --git a/Docker/sidecar/Dockerfile b/Docker/sidecar/Dockerfile
index ad784ba55..5e07ceaf4 100644
--- a/Docker/sidecar/Dockerfile
+++ b/Docker/sidecar/Dockerfile
@@ -1,4 +1,4 @@
-FROM nginx:1.15.6-alpine
+FROM nginx:1-alpine
COPY nginx.conf /etc/nginx/nginx.conf
COPY uwsgi.conf.template /etc/nginx/gen3.conf.d/uwsgi.conf.template
diff --git a/Jenkinsfile b/Jenkinsfile
index 365f1ca24..908c2d01a 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -43,9 +43,52 @@ metadata:
app: ephemeral-ci-run
netnolimit: "yes"
annotations:
+ karpenter.sh/do-not-evict: true
"cluster-autoscaler.kubernetes.io/safe-to-evict": "false"
spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
+ - matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ initContainers:
+ - name: wait-for-jenkins-connection
+ image: quay.io/cdis/gen3-ci-worker:master
+ command: ["/bin/sh","-c"]
+ args: ["while [ $(curl -sw '%{http_code}' http://jenkins-master-service:8080/tcpSlaveAgentListener/ -o /dev/null) -ne 200 ]; do sleep 5; echo 'Waiting for jenkins connection ...'; done"]
containers:
+ - name: jnlp
+ command: ["/bin/sh","-c"]
+ args: ["sleep 30; /usr/local/bin/jenkins-agent"]
+ resources:
+ requests:
+ cpu: 500m
+ memory: 500Mi
+ ephemeral-storage: 500Mi
+ - name: selenium
+ image: selenium/standalone-chrome:112.0
+ imagePullPolicy: Always
+ ports:
+ - containerPort: 4444
+ readinessProbe:
+ httpGet:
+ path: /status
+ port: 4444
+ timeoutSeconds: 60
+ resources:
+ requests:
+ cpu: 500m
+ memory: 500Mi
+ ephemeral-storage: 500Mi
- name: shell
image: quay.io/cdis/gen3-ci-worker:master
imagePullPolicy: Always
@@ -53,6 +96,11 @@ spec:
- sleep
args:
- infinity
+ resources:
+ requests:
+ cpu: 0.2
+ memory: 400Mi
+ ephemeral-storage: 1Gi
env:
- name: AWS_DEFAULT_REGION
value: us-east-1
@@ -86,8 +134,8 @@ spec:
readOnly: true
mountPath: "/usr/local/share/ca-certificates/cdis/cdis-ca.crt"
subPath: "ca.pem"
- - name: dockersock
- mountPath: "/var/run/docker.sock"
+ - name: containerdsock
+ mountPath: "/var/run/containerd/containerd.sock"
serviceAccount: jenkins-service
serviceAccountName: jenkins-service
volumes:
@@ -97,9 +145,9 @@ spec:
- name: ca-volume
secret:
secretName: "service-ca"
- - name: dockersock
+ - name: containerdsock
hostPath:
- path: /var/run/docker.sock
+ path: /var/run/containerd/containerd.sock
'''
defaultContainer 'shell'
}
@@ -245,8 +293,8 @@ spec:
script {
try {
if(!skipUnitTests) {
- sh '/usr/bin/pip3 install boto3 --upgrade --user'
- sh '/usr/bin/pip3 install kubernetes --upgrade --user'
+ sh '/usr/local/bin/pip3 install boto3 --upgrade --user'
+ sh '/usr/local/bin/pip3 install kubernetes --upgrade --user'
sh 'python3 -m pytest cloud-automation/apis_configs/'
sh 'python3 -m pytest cloud-automation/gen3/lib/dcf/'
sh 'cd cloud-automation/tf_files/aws/modules/common-logging && python3 -m pytest testLambda.py'
diff --git a/apis_configs/logo.svg b/apis_configs/logo.svg
index 7f056e548..da71f111e 100644
--- a/apis_configs/logo.svg
+++ b/apis_configs/logo.svg
@@ -1 +1,98 @@
-
\ No newline at end of file
+
+
diff --git a/doc/karpenter.md b/doc/karpenter.md
new file mode 100644
index 000000000..29aa35de1
--- /dev/null
+++ b/doc/karpenter.md
@@ -0,0 +1,59 @@
+# Introduction
+
+Karpenter is a modern cloud-native tool for Kubernetes cluster management and resource allocation. With its efficient and customizable scaling and orchestration capabilities, Karpenter is becoming an increasingly popular alternative to Cluster Autoscaler. In this document, we will discuss the benefits of using Karpenter over Cluster Autoscaler and why it is worth considering a switch.
+
+# Table of contents
+
+- [1. Benefits of Karpenter](#benefits-of-karpenter)
+- [2. Requirements](#requirements)
+- [3. How it Works](#how-it-works)
+- [4. Installation Steps](#installation-steps)
+- [5. Modifying the Provisioners and Awsnodetemplates](#modifying-the-provisioners-and-awsnodetemplates)
+- [6. Potential Issues](#potential-issues)
+
+## Benefits of Karpenter
+
+- Advanced Resource Allocation: Karpenter provides fine-tuned control over resource allocation, allowing for greater optimization of resource utilization. With its advanced features, Karpenter can ensure that nodes are appropriately sized and allocated, reducing the chance of overprovisioning or underutilization.
+- Scalability: Karpenter offers powerful scaling capabilities, allowing administrators to quickly and efficiently adjust the size of their cluster as needed. With its sophisticated scaling algorithms, Karpenter ensures that resources are optimized and that clusters are able to grow and shrink as needed.
+- Customizable: Karpenter allows administrators to customize and configure their cluster as needed. With its flexible and intuitive interface, administrators can easily adjust the size and composition of their cluster to meet the specific needs of their organization.
+- Efficient Management: Karpenter provides efficient and streamlined cluster management, allowing administrators to manage their resources more effectively. With its intuitive and powerful interface, administrators can easily allocate resources and monitor cluster performance, ensuring that their cluster is running smoothly and efficiently.
+
+## Requirements
+
+Karpenter requires access to AWS to be able to provision EC2 instances. It uses an EKS IAM service account with access to most EC2 resources. Once Karpenter is deployed it also requires configuration to decide which node types to spin up, described in the next section. Our base configuration relies on config provisioned using our terraform though, so it may require manual effort to install if not using our terraform. Last, since Karpenter is going to be the new cluster management system, we will need to uninstall the cluster autoscaler.
+
+## How it Works
+
+Karpenter works on the EKS level instead of the cloud level. This means the systems in place to configure which nodes to spin up are shifted from AWS to EKS configuration. Karpenter uses provisioners to replace autoscaling groups and awsnodetemplates to replace launch configs/templates. Once deployed you will need to create at least one provisioner and one awsnodetemplate so that karpenter can decide what nodes to spin up and once pods require new nodes to spin up karpenter will figure out the most efficient instance type to use based on the pod resources and allowed instance types specified within your provisioner/templates.
+
+## Installation Steps
+
+To install Karpenter using gen3 you can simply run the kube-setup-karpenter script. This script does the following to install karpenter.
+
+1. Creates a new karpenter namespace for the karpenter deployment to run in.
+2. Creates an EKS IAM service account with access to EC2 resources within AWS for the Karpenter deployment to use.
+3. Tags the relevent subnets and security groups for the karpenter deployment to autodiscover.
+4. Installs the karpenter helm deployment
+5. Installs the necessary provisioners and aws node templates.
+
+This can also be installed through the manifest by adding a .global.karpenter block to your manifest. If this block equals "arm" then it will also install the arm provisioner, which will provision arm based nodes for the default worker nodes.
+
+## Modifying the Provisioners and Awsnodetemplates
+
+If you ever need to change the behavior of the provisioners on the fly you can run the following command
+
+```bash
+kubectl edit provisioners.karpenter.sh
+```
+
+If you ever need to edit the awsnodetemplate you can do so with
+
+```bash
+kubectl edit awsnodetemplates.karpenter.k8s.aws
+```
+
+Base configuration lives in the [karpenter configration section](https://github.com/uc-cdis/cloud-automation/tree/master/kube/services/karpenter) of cloud-automation so you can edit this configuration for longer term or more widespread changes.
+
+## Potential Issues
+
+Karpenter is a powerful flexible tool, but with that can come some challenges. The first is Karpenter needs to be able to find subnets/security groups for your specific VPC. If there are multiple VPC's in an AWS account and multiple Karpenter deployments, we need to stray from the official Karpenter documentation when tagging subnets/security groups. Karpenter will find subnets/security groups tagged a certain way, so instead of setting the tag to be true for karpenter discovery we should set the value to be the VPC name, and similarly set it to be the VPC name within the karpenter configuration. Also, karpenter requires at least 2 nodes outside of any nodes it manages for it's deployment to run on. This is so that karpenter is always available and can schedule nodes without taking itself out. Because of this, we recommend running a regular EKS worker ASG with 2 min/max/desired for karpenter to run on. If these nodes ever need to be updated you will need to ensure karpenter comes back up after to ensure your cluster scales as intended.
diff --git a/doc/kube-setup-revproxy.md b/doc/kube-setup-revproxy.md
index 5c483e12f..fdf0b0db5 100644
--- a/doc/kube-setup-revproxy.md
+++ b/doc/kube-setup-revproxy.md
@@ -5,7 +5,8 @@ Configure and launch the reverse proxy.
## References
* the reverse proxy [readme](../kube/services/revproxy/README.md) has more details.
-* WAF - the reverse proxy deploys the [modsecurity web application firewall](./waf.md). (This is only deployed if the "deploy_elb" flag is set to true in the manifest-global configmap (set/added via the global section of the manifest.json).deploy the revproxy-ELB-service and WAF)
+* WAF - the reverse proxy deploys the [modsecurity web application firewall](./waf.md).
+* IMPORTANT: The modsecurity WAF and Revproxy ELB service is only deployed if the "deploy_elb" flag is set to true in the manifest-global configmap. The Revproxy ELB is now deprecated- we suggest deploying an AWS ALB instead (please see kube-setup-ingress script)
* Please see https://github.com/uc-cdis/cloud-automation/blob/master/doc/kube-setup-ingress.md as AWS WAF and ALB is recommended.
* [maintenance mode](./maintenance.md)
* the [ip blacklist](../gen3/lib/manifestDefaults/revproxy/) may be configured with a custom `manifests/revproxy/blacklist.conf`
diff --git a/doc/s3-to-google-replication.md b/doc/s3-to-google-replication.md
new file mode 100644
index 000000000..82d0374c7
--- /dev/null
+++ b/doc/s3-to-google-replication.md
@@ -0,0 +1,68 @@
+# S3 to Google Cloud Storage Replication Pipeline
+
+This document will guide you through setting up a replication pipeline from AWS S3 to Google Cloud Storage (GCS) using VPC Service Controls and Storage Transfer Service. This solution is compliant with security best practices, ensuring that data transfer between AWS S3 and GCS is secure and efficient.
+
+## Table of Contents
+
+- [Prerequisites](#prerequisites)
+- [Step-by-step Guide](#step-by-step-guide)
+ - [Setup VPC Service Controls](#setup-vpc-service-controls)
+ - [Initiate Storage Transfer Service](#initiate-storage-transfer-service)
+- [Compliance Benefits](#compliance-benefits)
+- [Cost Benefit Analysis](#cost-benefit-analysis)
+
+## Prerequisites
+
+1. **AWS account** with access to the S3 bucket.
+2. **Google Cloud account** with permissions to create buckets in GCS and set up VPC Service Controls and Storage Transfer Service.
+3. Familiarity with AWS IAM for S3 bucket access and Google Cloud IAM for GCS access.
+
+## Step-by-step Guide
+
+### Setup VPC Service Controls
+
+1. **Access the VPC Service Controls** in the Google Cloud Console.
+2. **Create a new VPC Service Control perimeter**.
+ - Name the perimeter and choose the desired region.
+ - Add the necessary GCP services. Ensure to include `storagetransfer.googleapis.com` for Storage Transfer Service.
+3. **Setup VPC Service Control Policy** to allow connections from AWS.
+ - Use the [documentation](https://cloud.google.com/vpc-service-controls/docs/set-up) to help set up.
+
+### Initiate Storage Transfer Service
+
+1. Navigate to **Storage Transfer Service** in the Google Cloud Console.
+2. Click **Create Transfer Job**.
+3. **Select Source**: Choose Amazon S3 bucket and provide the necessary details.
+ - Ensure to have necessary permissions for the S3 bucket in AWS IAM.
+4. **Select Destination**: Choose your GCS bucket.
+5. **Schedule & Advanced Settings**: Set the frequency and conditions for the transfer. Consider setting up notifications for job completion or errors.
+6. **Review & Create**: Confirm the details and initiate the transfer job.
+
+## Compliance Benefits
+
+Setting up a secure replication pipeline from AWS S3 to GCS using VPC Service Controls and Storage Transfer Service offers the following compliance benefits:
+
+1. **Data Security**: The VPC Service Controls provide an additional layer of security by ensuring that the transferred data remains within a defined security perimeter, reducing potential data leak risks.
+2. **Auditability**: Both AWS and GCS offer logging and monitoring tools that can provide audit trails for data transfer. This can help in meeting regulatory compliance requirements.
+3. **Consistent Data Replication**: The Storage Transfer Service ensures that data in GCS is up to date with the source S3 bucket, which is essential for consistent backup and disaster recovery strategies.
+
+## Cost Benefit Analysis
+
+**Benefits**:
+
+1. **Data Redundancy**: Having data stored in multiple cloud providers can be a part of a robust disaster recovery strategy.
+2. **Flexibility**: Replicating data to GCS provides flexibility in multi-cloud strategies, enabling seamless migrations or usage of GCP tools and services.
+3. **Security**: Utilizing VPC Service Controls strengthens the security posture.
+
+**Costs**:
+
+1. **Data Transfer Costs**: Both AWS and Google Cloud might charge for data transfer. It's crucial to analyze the cost, especially for large data transfers.
+2. **Storage Costs**: Storing data redundantly incurs additional storage costs in GCS.
+
+**Analysis**:
+
+To stay in compliance, we require multiple copies of our data in separate datacenters or clouds. After our security audit, we found the important of not keeping data in a single cloud. It may be expensive to transfer data from AWS to GCP and to store it in 2 clouds simultaniously, but if we need to, then this solution could be an easy way to achieve compliance.
+
+---
+
+Please note that while this guide is based on the provided Google Cloud documentation, it's crucial to refer to the original [documentation](https://cloud.google.com/architecture/transferring-data-from-amazon-s3-to-cloud-storage-using-vpc-service-controls-and-storage-transfer-service) for the most accurate and up-to-date information.
diff --git a/files/authorized_keys/squid_authorized_keys_user b/files/authorized_keys/squid_authorized_keys_user
index 46b43a030..4b35fecd9 100644
--- a/files/authorized_keys/squid_authorized_keys_user
+++ b/files/authorized_keys/squid_authorized_keys_user
@@ -18,4 +18,5 @@ ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKJR5N5VIU9qdSfCtlskzuQ7A5kNn8YPeXsoKq0HhY
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC3vyd6a7tsANi149ylPQYS8Gsp/SxJyhdK/j6arv77KbM0EIzzUiclFLnMKcqUQ263FrPyx3a3UP80R77ayCnwcEHrxlJrYfyFUva8vtmI9mu8VE7oXvuR/jcOyXM9NosxyYacL/p6W5X4r8tqo/gJFjmls1YRfu3JPlTgTT0VzGJu+B6rLEsw53c37VVzSaCtu/jBOjyxI1/UaNg1cd+hcfoQxJ9zSDqqE7ZUNOc3zHP+1AGYCQ/CJsNrDl2OkppIdC9He5jgjLhyD7yvyarI+oF05oHknol/K1hXK+yxIkF2Ou5krfjw7TMBvD+JbQVb35vL9acXFF20+lHLRLbobPU/6ZZTup3q7IRm5OWaL2CJtYZbJvicKW0Ep+vTzaiQjK71L6UxcIvnzvbP9Dnatv1GBMMDaQxAa4Lood8NG2ty1yfLN972akGqBlwJASXMRd/ogzxv2KSH9w6HHYoc2WpDhUtNHmjwX1FSLYPW3qx5ICMW6j9gR2u1tG4Ohzp1CmYVElnRHbnBrTkLde65Vqedk2tQy8fcopH59ZASIuR4GbhCb2SiNkr1PHEvfhLMzg/UCSnnhX9vUNhkPjJRy/bdL3pOt/77lpIQUqQBArOiZmfG8OD0q4+3Nr+c9v5bSSvynjirlKk+wb8sKyOoSAXdFeovL/A0BUKUjCtsXQ== dev@test.com
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQChK/8JjpUeWcF/1Ea2M4mSbLz1tOfpq74xD2USxE54kx7VoN1G7ylV76yqSIeRq1e7PPBEg5ZD1aXUJnlI32RwLJ5kaHnoB82Ta+Fv1B/vVoHCObcALfiHPpwPf1kM2liWEB0EhYcz1OUv3YQriPqjiRoWfnbw60GIyzhpWZhKRq0zlISOaTYdV9kafX+N7M6/gSU0632TgUwwsStYrffEleyrC/Lh+4UaESozWoPFiZLl2eMCKfZNFBB99HTFifImW2yC6Ag1QhCd1i3NpfiYuaSDH7WR3slPRSd8DiUAwGC2DkIuWPp3bhaAv2V4mtLIBAaTZsINIACB2+w7yf9yvCGtdobCmp4AA7ik9rEkRLk/Jff0YBHd6Z4qyIuRht3ZeWXIYSK1zOlPfs4lPUgvbjlPgMVFV2CrvOTnS+YZdW+8AklwRC3HDPD8wv3H/eGxl3K0vHWTBbTb774nVNfRDw81wcezCXFNUn4p2he7fgKcxs/rnMsYUcY8JJNR7Iz+NNIGUCom6HFwCMQdangFMHUW5TxxrlJcwVRaAns1M6g3ilYO+uvN/XsgCpZWYWnv5rBk8qz6dBM7gpc8tSr6Hvr7/vlghF3jpL+mQiW+7vUL+UZrUFNyoacUcQ+NuxKacHtHQKuRDyWofp+CB2b2a744F3mpkxx74HIkiZ72mQ== dev@test.com
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDDTX+pQvGrQVXmHGDhBP+632tgbb1j+BQWkrsUkDJGzwFiGs4dgqDs2eC+aDVq2LFz4xj0SgussFAKciB45OgmSZKX5yUE3Oo/lqov0Bb5f85iBHGv/X/JiuIYaq8GJklVyyo1sfKLUK1SOal6bE1WofezyTyDsdrHjIU50quzW7nB1CmL6rekIv/+df/seut4b3De1d2uX5WGGtcvQ5yTSgBW5aabMAJ2V9WlP/6Dw040Kq0MyKV01cIJ1HAjFhP58gbf3Eytz3AqqJVT6u0QroxhesCgKTyGcAyYy3airI/N0FHdC5oABVEJ6dKyy1rYvOchuxYeVMVVWn0vS7mZ+vP7dqaDmgEUU2qmTPBQZV2xBWCdpfyUYYARW2JzlEaySbmA+yoxFBsquunVbIgUGNEUbxefsFdM3k5pS6I1uuEM0ATYH5iNz84nKKCcksGlib0i/pEtra6N/mFF7yjHYBRb/E/VCZig0gKezDJWu/DO0emJA+kdQpqp48U+qFrSWkuiO0dCQYl3VCVo8vedgMGPjr8MbUjU7o8W1+DYyjFM8HYMknRNdVAqAoK+cedw9mAWVGpKFrl61caGTFck0634nAVFUmfGTh9XRaZeFdDnivxnqP837gcsdKnEGYnkrxWap97XeXzK0P0Svy1zBfUQyzU5vrHfHt2H7ILDMw== prodv1-usersync-sftp
-ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDaO/doqHANcTZFEqZOoy9kKgbxu0d/cS1nEINlFcoQ/jnCG7huznWnWiYgnlkS6/Op9VrDp6qG/UBDye2mTvAh2FHPsOzSGvgml3dPYB5fy6G/xoXd7NJnIxttwFUvk4GuLZ40s24WCcXoFGJ2vaSAVYr0q6lmqOqk6jp1/lNj4+QFD4mcH2//jTscSFNseRII2NECu+PnnWAuYFOIHH1IODOvInEivUvN6VBX410D7iD7cEdhgiYitFZH6Cp6ubWG7OUKdZYv0067eO6HDDzl7y+BBUf3DF6Lr8gqtGXVqmAB9UqeBJ8pP3pNWKbgAa8sHvS8JxElCIc+4EM5dTI2OrDYKiuCTPZEC14WEFZLKqH7tjQFuZe0jfVRtoFNmKWClCgkJDWpyIkdR+qHcnOwlYkUVN3B02WVu4kTfox2ZUz65tLspJNAxAjYVrI7+c6LTQHSJwMcAMYcehR3vuqAfKE7xM6ReNxRQXsWaasdJgT2IJKj7vHu/G9GVycjiheg3zakJ9rr+63I68XlHNnTtfjIl/jgIHgcU18ggbwkwjL3xk39YttutlAaNAGUYCsopn/HdK8A86KvTCwHGEKtubgEHmv1oRAOooVaNes1oko2y9Saaqee52bsvwfeTLgxXB43d9GOWLoyBlgprDiufssFHoiJKQlgrqEwtg+vYQ== giangbui0816@gmail.com
\ No newline at end of file
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDaO/doqHANcTZFEqZOoy9kKgbxu0d/cS1nEINlFcoQ/jnCG7huznWnWiYgnlkS6/Op9VrDp6qG/UBDye2mTvAh2FHPsOzSGvgml3dPYB5fy6G/xoXd7NJnIxttwFUvk4GuLZ40s24WCcXoFGJ2vaSAVYr0q6lmqOqk6jp1/lNj4+QFD4mcH2//jTscSFNseRII2NECu+PnnWAuYFOIHH1IODOvInEivUvN6VBX410D7iD7cEdhgiYitFZH6Cp6ubWG7OUKdZYv0067eO6HDDzl7y+BBUf3DF6Lr8gqtGXVqmAB9UqeBJ8pP3pNWKbgAa8sHvS8JxElCIc+4EM5dTI2OrDYKiuCTPZEC14WEFZLKqH7tjQFuZe0jfVRtoFNmKWClCgkJDWpyIkdR+qHcnOwlYkUVN3B02WVu4kTfox2ZUz65tLspJNAxAjYVrI7+c6LTQHSJwMcAMYcehR3vuqAfKE7xM6ReNxRQXsWaasdJgT2IJKj7vHu/G9GVycjiheg3zakJ9rr+63I68XlHNnTtfjIl/jgIHgcU18ggbwkwjL3xk39YttutlAaNAGUYCsopn/HdK8A86KvTCwHGEKtubgEHmv1oRAOooVaNes1oko2y9Saaqee52bsvwfeTLgxXB43d9GOWLoyBlgprDiufssFHoiJKQlgrqEwtg+vYQ== giangbui0816@gmail.com
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDTpJ2l8nfOvhJ4Y3pjadFU69nfJBRuE0BaHE22LK9qflFWdhGW+T/x8Qy9406DFXCh6KED+q9lC+N4nR92AfgFNaBmkXZkzWLoXcqO1IWRexHwTqAUcrtLjpx5wNdCr3+vv9hWhXtvYg8ewnrZc+WxYde4EUmXbhzPXbg0SkBXTr6bpYhs6inyttfBeJNxbeydrW7cmhFiAdOkm03o3AXdH86PNlWVfVHy8OHHzf4fbvlJlOx7OeB+wOyQUr3DW+IWBLQFJk4uyagn/ECV9OIQpxoJFTQjcSrJ6v/GqlY5PImM6YxL8NlZu46CDIxagaZkum+iJ8dtPYr6tJuLiP5Ny0Gsl1X5DoKlstgyqqPNYTnZVS4GSS5Hyxm6HmodZ78OR5+vAoyWKZ3unXU5Dbkz0Qxq9VtrGo2xd0M+dDi/7YazRpLL0tc39w48Wl7KD3jFzoesZp1JHeEGLdGXlGCw8AM1FT0WDf28ShTRds6uWPGvMtM3XkVDPMLFwroKv1RCErmqLYod4HOMuwlmdRvtDGYb3NYsliOnHPiT9nhu2J6KmT1jj8uFOLyTaJCArtBqIsXscP3R4o0wBlQl3FniMdiK7ESkv8DUaOr1Co+/3wX9n/p/BW5bxuq1R9HpNyKsrALyNJUkquVT+5aPcNKXvmAeHAw/D0TYzy6ZKBpnDw== kyuleburton@Kyules-MacBook-Pro.local
diff --git a/files/dashboard/maintenance-page/index.html b/files/dashboard/maintenance-page/index.html
index a3e34479b..fac49e64e 100644
--- a/files/dashboard/maintenance-page/index.html
+++ b/files/dashboard/maintenance-page/index.html
@@ -16,7 +16,7 @@
@@ -27,12 +27,12 @@ This site is under maintenance...
Please check back later.
-
+
diff --git a/files/dashboard/usage-reports/package-lock.json b/files/dashboard/usage-reports/package-lock.json
index 4841621b6..24e3de518 100644
--- a/files/dashboard/usage-reports/package-lock.json
+++ b/files/dashboard/usage-reports/package-lock.json
@@ -5,14 +5,14 @@
"requires": true,
"dependencies": {
"jasmine-core": {
- "version": "3.6.0",
- "resolved": "https://registry.npmjs.org/jasmine-core/-/jasmine-core-3.6.0.tgz",
- "integrity": "sha512-8uQYa7zJN8hq9z+g8z1bqCfdC8eoDAeVnM5sfqs7KHv9/ifoJ500m018fpFc7RDaO6SWCLCXwo/wPSNcdYTgcw=="
+ "version": "3.99.1",
+ "resolved": "https://registry.npmjs.org/jasmine-core/-/jasmine-core-3.99.1.tgz",
+ "integrity": "sha512-Hu1dmuoGcZ7AfyynN3LsfruwMbxMALMka+YtZeGoLuDEySVmVAPaonkNoBRIw/ectu8b9tVQCJNgp4a4knp+tg=="
},
"lit-html": {
- "version": "1.3.0",
- "resolved": "https://registry.npmjs.org/lit-html/-/lit-html-1.3.0.tgz",
- "integrity": "sha512-0Q1bwmaFH9O14vycPHw8C/IeHMk/uSDldVLIefu/kfbTBGIc44KGH6A8p1bDfxUfHdc8q6Ct7kQklWoHgr4t1Q=="
+ "version": "1.4.1",
+ "resolved": "https://registry.npmjs.org/lit-html/-/lit-html-1.4.1.tgz",
+ "integrity": "sha512-B9btcSgPYb1q4oSOb/PrOT6Z/H+r6xuNzfH4lFli/AWhYwdtrgQkQWBbIc6mdnf6E2IL3gDXdkkqNktpU0OZQA=="
}
}
}
diff --git a/files/dashboard/usage-reports/package.json b/files/dashboard/usage-reports/package.json
index 6552248ae..7f66661ce 100644
--- a/files/dashboard/usage-reports/package.json
+++ b/files/dashboard/usage-reports/package.json
@@ -10,7 +10,7 @@
"author": "",
"license": "ISC",
"dependencies": {
- "jasmine-core": "^3.6.0",
- "lit-html": "^1.3.0"
+ "jasmine-core": "^3.99.1",
+ "lit-html": "^1.4.1"
}
}
diff --git a/files/openvpn_management_scripts/create_ovpn.sh b/files/openvpn_management_scripts/create_ovpn.sh
index 4e6ba7bf5..4d351464b 100755
--- a/files/openvpn_management_scripts/create_ovpn.sh
+++ b/files/openvpn_management_scripts/create_ovpn.sh
@@ -29,8 +29,8 @@ set -e
set -u
-USER_CERT_PATH="$KEY_PATH/$1.crt"
-USER_KEY_PATH="$KEY_PATH/$1.key"
+USER_CERT_PATH="$KEY_PATH/issued/$1.crt"
+USER_KEY_PATH="$KEY_PATH/private/$1.key"
#HEADER
diff --git a/files/openvpn_management_scripts/create_seperated_vpn_zip.sh b/files/openvpn_management_scripts/create_seperated_vpn_zip.sh
index 1794a3b69..c7ac6ce3a 100755
--- a/files/openvpn_management_scripts/create_seperated_vpn_zip.sh
+++ b/files/openvpn_management_scripts/create_seperated_vpn_zip.sh
@@ -30,8 +30,8 @@ username=${username// /_}
# now, clean out anything that's not alphanumeric or an underscore
username=${username//[^a-zA-Z0-9_-.]/}
-USER_CERT_PATH="$KEY_PATH/$1.crt"
-USER_KEY_PATH="$KEY_PATH/$1.key"
+USER_CERT_PATH="$KEY_PATH/issued/$1.crt"
+USER_KEY_PATH="$KEY_PATH/private/$1.key"
#make a temp dir
TEMP_NAME="$username-$CLOUD_NAME-seperated"
@@ -47,6 +47,7 @@ cp $USER_KEY_PATH $TEMP_DIR/client.key
#This is because EXTHOST is a defined variable in the template
while read r; do eval echo $r; done < $TEMPLATE_DIR/client_ovpn_seperate.settings >> $TEMP_DIR/${username}-${CLOUD_NAME}.ovpn
+mkdir -p $KEY_DIR/ovpn_files_seperated
tar -C $TEMP_DIR/../ -zcvf $KEY_DIR/ovpn_files_seperated/${username}-${CLOUD_NAME}-seperated.tgz $TEMP_NAME
echo -e "Exiting ${BOLD}$_${CLEAR}"
diff --git a/files/openvpn_management_scripts/create_vpn_user.sh b/files/openvpn_management_scripts/create_vpn_user.sh
index 2f3ef406b..39be17fcb 100755
--- a/files/openvpn_management_scripts/create_vpn_user.sh
+++ b/files/openvpn_management_scripts/create_vpn_user.sh
@@ -49,13 +49,16 @@ export KEY_EMAIL=$email
export KEY_ALTNAMES="DNS:${KEY_CN}"
#This create the key's for the road warrior
-echo -e "running ${YELLOW} build-batch-key"
-build-key-batch $username &>/dev/null && echo -e "${GREEN}success!" || echo -e "${RED}failure";echo -e $CLEAR
+echo -e "running ${YELLOW} easyrsa build-client-full"
+(
+ cd $EASYRSA_PATH
+ easyrsa build-client-full $username nopass &>/dev/null && echo -e "${GREEN}success!" || echo -e "${RED}failure";echo -e $CLEAR
+)
#&& echo -e "${GREEN}success!" || echo -e "${RED}failure";echo -e $CLEAR
-echo "Backup certs so we can revoke them if ever needed"
-[ -d $KEY_DIR/user_certs/ ] || mkdir $KEY_DIR/user_certs/
-cp $KEY_DIR/$username.crt $KEY_DIR/user_certs/$username.crt-$(date +%F-%T) && echo -e "${GREEN}success!" || echo -e "${RED}failure";echo -e $CLEAR
+# echo "Backup certs so we can revoke them if ever needed"
+# [ -d $KEY_DIR/user_certs/ ] || mkdir $KEY_DIR/user_certs/
+# cp $KEY_DIR/$username.crt $KEY_DIR/user_certs/$username.crt-$(date +%F-%T) && echo -e "${GREEN}success!" || echo -e "${RED}failure";echo -e $CLEAR
echo "Create the OVPN file for $username"
$VPN_BIN_ROOT/create_ovpn.sh $KEY_CN $KEY_EMAIL > $KEY_DIR/ovpn_files/${username}-${CLOUD_NAME}.ovpn 2> /dev/null && echo -e "${GREEN}success!" || echo -e "${RED}failure";echo -e $CLEAR
diff --git a/files/openvpn_management_scripts/install_ovpn.sh b/files/openvpn_management_scripts/install_ovpn.sh
index 795ac17f2..4250d2ca2 100644
--- a/files/openvpn_management_scripts/install_ovpn.sh
+++ b/files/openvpn_management_scripts/install_ovpn.sh
@@ -12,13 +12,13 @@ VARS_PATH="$EASYRSA_PATH/vars"
#EASY-RSA Vars
- KEY_SIZE=4096
- COUNTRY="US"
- STATE="IL"
- CITY="Chicago"
- ORG="CDIS"
- EMAIL='support\@datacommons.io'
- KEY_EXPIRE=365
+KEY_SIZE=4096
+COUNTRY="US"
+STATE="IL"
+CITY="Chicago"
+ORG="CDIS"
+EMAIL='support\@datacommons.io'
+KEY_EXPIRE=365
#OpenVPN
diff --git a/files/openvpn_management_scripts/reset_totp_token.sh b/files/openvpn_management_scripts/reset_totp_token.sh
index b844af8f2..e937876a2 100755
--- a/files/openvpn_management_scripts/reset_totp_token.sh
+++ b/files/openvpn_management_scripts/reset_totp_token.sh
@@ -40,11 +40,15 @@ update_password_file() {
}
generate_qr_code() {
- uuid=$(uuidgen)
- qrcode_out=/var/www/qrcode/${uuid}.svg
+ mkdir -p /etc/openvpn/pki/qrcodes
+ qrcode_out=/etc/openvpn/pki/qrcodes/${vpn_username}.png
string=$( python -c "import pyotp; print( pyotp.totp.TOTP('$totp_secret').provisioning_uri('$vpn_username', issuer_name='$CLOUD_NAME') )" )
- $( python -c "import pyqrcode; pyqrcode.create('$string').svg('${qrcode_out}', scale=8)" )
- vpn_creds_url="https://${FQDN}/$uuid.svg"
+ $( python -c "import qrcode; qrcode.make('$string').save('${qrcode_out}')" )
+ # vpn_creds_url="https://${FQDN}/$uuid.svg"
+ s3Path="s3://${S3BUCKET}/qrcodes/${vpn_username}.png"
+ aws s3 cp ${qrcode_out} ${s3Path}
+ signedUrl="$(aws s3 presign "$s3Path" --expires-in "$((60*60*48))")"
+ vpn_creds_url=${signedUrl}
}
print_info() {
diff --git a/files/openvpn_management_scripts/revoke_user.sh b/files/openvpn_management_scripts/revoke_user.sh
index 0ffe5c364..89d102f38 100755
--- a/files/openvpn_management_scripts/revoke_user.sh
+++ b/files/openvpn_management_scripts/revoke_user.sh
@@ -25,18 +25,15 @@ set -e
username=${1}
-#Source the settings for EASY RSA
-source $EASYRSA_PATH/vars
#Override exports
export KEY_CN=$username
-set +e
-#revoke-full $username || echo -e "${RED}${BOLD}${BLINK}FAILED TO REVOKE ${username}${CLEAR}"
-revoke-full $username
-#Apparently it doesn't exist like I expected, and says failed even when it succeeded.
-
-set -e
+(
+ cd $EASYRSA_PATH
+ ./easyrsa revoke $username
+ ./easyrsa gen-crl
+)
sed -i "/${username},/d" $USER_PW_FILE || echo -e "${RED}${BOLD}${BLINK}Failed to remove $username from file ${USER_PW_FILE}${CLEAR}"
/etc/openvpn/bin/push_to_s3.sh
diff --git a/files/openvpn_management_scripts/send_email.sh b/files/openvpn_management_scripts/send_email.sh
index 38ec6651a..0686af206 100755
--- a/files/openvpn_management_scripts/send_email.sh
+++ b/files/openvpn_management_scripts/send_email.sh
@@ -14,7 +14,7 @@ RED="\033[31m"
echo -e "Entering ${BOLD}$_${CLEAR}"
-S3BUCKET=WHICHVPN
+export S3BUCKET=WHICHVPN
if [ "${1}" == "" ]
then
diff --git a/files/openvpn_management_scripts/templates/network_tweaks.sh.template b/files/openvpn_management_scripts/templates/network_tweaks.sh.template
index a137a8c6f..1caa8c36a 100644
--- a/files/openvpn_management_scripts/templates/network_tweaks.sh.template
+++ b/files/openvpn_management_scripts/templates/network_tweaks.sh.template
@@ -14,3 +14,5 @@ iptables -I FORWARD -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
# Masquerade
iptables -t nat -A POSTROUTING -s #VPN_SUBNET# -d #VM_SUBNET# -o $vpnserver_int -j MASQUERADE
echo 1 > /proc/sys/net/ipv4/ip_forward
+
+service iptables save
diff --git a/files/openvpn_management_scripts/templates/openvpn.conf.template b/files/openvpn_management_scripts/templates/openvpn.conf.template
index d539015fe..7e692113e 100644
--- a/files/openvpn_management_scripts/templates/openvpn.conf.template
+++ b/files/openvpn_management_scripts/templates/openvpn.conf.template
@@ -10,16 +10,16 @@ persist-key
persist-tun
#certificates
-ca easy-rsa/keys/ca.crt
-cert easy-rsa/keys/#FQDN#.crt
-key easy-rsa/keys/#FQDN#.key # This file should be kept secret
-dh easy-rsa/keys/dh4096.pem
-tls-auth easy-rsa/keys/ta.key 0 # This file is secret
-crl-verify easy-rsa/keys/crl.pem # Revocation files
+ca /etc/openvpn/easy-rsa/pki/ca.crt
+cert /etc/openvpn/easy-rsa/pki/issued/#FQDN#.crt
+key /etc/openvpn/easy-rsa/pki/private/#FQDN#.key # This file should be kept secret
+dh /etc/openvpn/easy-rsa/pki/dh.pem
+tls-auth /etc/openvpn/easy-rsa/pki/ta.key 0 # This file is secret
+crl-verify /etc/openvpn/easy-rsa/pki/crl.pem # Revocation files
#Password script
-auth-user-pass-verify bin/auth-user-pass-verify.sh via-env
-script-security 3 execve
+auth-user-pass-verify /etc/openvpn/bin/auth-user-pass-verify.sh via-env
+script-security 3 # execve
#Cipher suite
cipher AES-256-CBC
diff --git a/files/openvpn_management_scripts/templates/settings.sh.template b/files/openvpn_management_scripts/templates/settings.sh.template
index 2d5f46ef6..c58e8b98c 100644
--- a/files/openvpn_management_scripts/templates/settings.sh.template
+++ b/files/openvpn_management_scripts/templates/settings.sh.template
@@ -1,6 +1,7 @@
export VPN_SETTINGS_LOADED="1"
export CLOUD_NAME='#CLOUD_NAME#'
export FQDN="#FQDN#"
+export EXTHOST='#CLOUD_NAME#.planx-pla.net'
## EXTHOST is set in the easy-rsa/vars env settings. I think these values have to maych so removing from here
#sendemail vars
@@ -28,7 +29,7 @@ export OPENVPN_MY_BIN="/etc/openvpn/bin"
#CDIS OpenVPN scripts contants
export TEMPLATE_DIR="/etc/openvpn/bin/templates"
-export KEY_PATH="/etc/openvpn/easy-rsa/keys/"
+export KEY_PATH="/etc/openvpn/easy-rsa/pki/"
export CA_PATH="$KEY_PATH/ca.crt"
export TA_KEY_PATH="$KEY_PATH/ta.key"
export ARCHIVE_CERT_DIR="$KEY_DIR/user_certs/"
@@ -37,6 +38,6 @@ export USER_PW_FILE="/etc/openvpn/user_passwd.csv"
export VPN_BIN_ROOT="/etc/openvpn/bin"
export VPN_USER_CSV="/etc/openvpn/user_passwd.csv"
export VPN_FILE_ATTACHMENTS="-a$VPN_BIN_ROOT/OpenVPN_for_PLANX_Installation_Guide.pdf"
-
+export KEY_DIR="$EASYRSA_PATH/pki"
export PATH=$PATH:$EASYRSA_PATH:$OPENVPN_MY_BIN
source /etc/openvpn/bin/.venv/bin/activate
diff --git a/files/openvpn_management_scripts/templates/vars.template b/files/openvpn_management_scripts/templates/vars.template
index 0afa0c554..311f05605 100644
--- a/files/openvpn_management_scripts/templates/vars.template
+++ b/files/openvpn_management_scripts/templates/vars.template
@@ -1,81 +1,25 @@
-# easy-rsa parameter settings
-export EXTHOST="#EXTHOST#"
+# EasyRSA 3 vars file
-# NOTE: If you installed from an RPM,
-# don't edit this file in place in
-# /usr/share/openvpn/easy-rsa --
-# instead, you should copy the whole
-# easy-rsa directory to another location
-# (such as /etc/openvpn) so that your
-# edits will not be wiped out by a future
-# OpenVPN package upgrade.
+# This is a user-customized vars file for EasyRSA 3.
+# Adjust these values to suit your needs.
-# This variable should point to
-# the top level of the easy-rsa
-# tree.
-export EASY_RSA="#EASY_RSA_DIR#"
+# Key Size - Increase to 2048 if you are paranoid. This affects performance.
+set_var EASYRSA_KEY_SIZE #KEY_SIZE#
-#
-# This variable should point to
-# the requested executables
-#
-export OPENSSL="openssl"
-export PKCS11TOOL="pkcs11-tool"
-export GREP="grep"
+# CA and Certificate Expiry - Set these to your desired expiry in days
+set_var EASYRSA_CA_EXPIRE 3650
+set_var EASYRSA_CERT_EXPIRE #KEY_EXPIRE#
+# Fields for the request Distinguished Name (DN)
+# Adjust these to match your organization's information
+set_var EASYRSA_REQ_COUNTRY "#COUNTRY#"
+set_var EASYRSA_REQ_PROVINCE "#STATE#"
+set_var EASYRSA_REQ_CITY "#CITY#"
+set_var EASYRSA_REQ_ORG "#ORG#"
+set_var EASYRSA_REQ_EMAIL "#EMAIL#"
+set_var EASYRSA_REQ_OU "#OU#"
-# This variable should point to
-# the openssl.cnf file included
-# with easy-rsa.
-export KEY_CONFIG=`$EASY_RSA/whichopensslcnf $EASY_RSA`
-# Edit this variable to point to
-# your soon-to-be-created key
-# directory.
-#
-# WARNING: clean-all will do
-# a rm -rf on this directory
-# so make sure you define
-# it correctly!
-export KEY_DIR="$EASY_RSA/keys"
+set_var EASYRSA_BATCH "1"
-# Issue rm -rf warning
-echo NOTE: If you run ./clean-all, I will be doing a rm -rf on $KEY_DIR
-
-# PKCS11 fixes
-export PKCS11_MODULE_PATH="dummy"
-export PKCS11_PIN="dummy"
-
-# Increase this to 2048 if you
-# are paranoid. This will slow
-# down TLS negotiation performance
-# as well as the one-time DH parms
-# generation process.
-export KEY_SIZE=#KEY_SIZE#
-
-# In how many days should the root CA key expire?
-export CA_EXPIRE=3650
-
-# In how many days should certificates expire?
-export KEY_EXPIRE=#KEY_EXPIRE#
-
-# These are the default values for fields
-# which will be placed in the certificate.
-# Don't leave any of these fields blank.
-export KEY_COUNTRY="#COUNTRY#"
-export KEY_PROVINCE="#STATE#"
-export KEY_CITY="#CITY#"
-export KEY_ORG="#ORG#"
-export KEY_EMAIL="#EMAIL#"
-export KEY_OU="#OU#"
-
-# X509 Subject Field
-export KEY_NAME="#KEY_NAME#"
-
-# PKCS11 Smart Card
-# export PKCS11_MODULE_PATH="/usr/lib/changeme.so"
-# export PKCS11_PIN=1234
-
-# If you'd like to sign all keys with the same Common Name, uncomment the KEY_CN export below
-# You will also need to make sure your OpenVPN server config has the duplicate-cn option set
-# export KEY_CN="CommonName"
+# Note: Do not leave any of the fields blank as it may cause the script to fail.
diff --git a/files/scripts/ci-env-pool-reset.sh b/files/scripts/ci-env-pool-reset.sh
index 3f1d951d2..362cfbfd5 100644
--- a/files/scripts/ci-env-pool-reset.sh
+++ b/files/scripts/ci-env-pool-reset.sh
@@ -27,21 +27,19 @@ fi
source "${GEN3_HOME}/gen3/gen3setup.sh"
cat - > jenkins-envs-services.txt < jenkins-envs-releases.txt < |
+
+and then allows each AWS account to acccess the appropriate ECR repositories. The users' ECR repositories are based on their username as stored in the table. For example, `user1@username.com`'s ECR repository is assumed to be `nextflow-approved/user1-40username-2ecom`.
+
+### Access needed
+
+- "EcrRepoPolicyUpdateRole" role in the account (Acct1) that contains the ECR repositories:
+
+**Note:** `kube-setup-ecr-access-cronjob.sh` assumes this role already exists.
+
+Permissions:
+```
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "UpdateEcrRepoPolicy",
+ "Effect": "Allow",
+ "Action": "ecr:SetRepositoryPolicy",
+ "Resource": "arn:aws:ecr:us-east-1::repository/nextflow-approved/*"
+ }
+ ]
+}
+```
+
+Trust policy (allows Acct2):
+```
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "AllowAssumingRole",
+ "Effect": "Allow",
+ "Principal": {
+ "AWS": "arn:aws:iam:::root"
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+}
+```
+
+- Policy in the account (Acct2) that contains the DynamoDB table (created automatically by `kube-setup-ecr-access-job.sh`):
+```
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "ReadDynamoDB",
+ "Effect": "Allow",
+ "Action": [
+ "dynamodb:Scan"
+ ],
+ "Resource": "arn:aws:dynamodb:::table/"
+ },
+ {
+ "Sid": "AssumeEcrRole",
+ "Effect": "Allow",
+ "Action": [
+ "sts:AssumeRole"
+ ],
+ "Resource": "arn:aws:iam:::role/"
+ }
+ ]
+}
+```
diff --git a/files/scripts/ecr-access-job.py b/files/scripts/ecr-access-job.py
new file mode 100644
index 000000000..828d94c96
--- /dev/null
+++ b/files/scripts/ecr-access-job.py
@@ -0,0 +1,177 @@
+"""
+See documentation at https://github.com/uc-cdis/cloud-automation/blob/master/files/scripts/ecr-access-job.md
+"""
+
+from decimal import Decimal
+import json
+import os
+from typing import List
+import uuid
+
+import boto3
+from boto3.dynamodb.conditions import Attr
+
+
+REGION = "us-east-1"
+
+# for local testing. in production, use a service account instead of a key.
+MAIN_ACCOUNT_CREDS = {"key_id": os.environ.get("KEY_ID"), "key_secret": os.environ.get("KEY_SECRET")}
+
+
+def escapism(string: str) -> str:
+ """
+ This is a direct translation of Hatchery's `escapism` golang function to python.
+ We need to escape the username in the same way it's escaped by Hatchery's `escapism` function because
+ special chars cannot be used in an ECR repo name, and so that the ECR repo generated here matches the
+ name expected by Hatchery.
+ """
+ safeBytes = "abcdefghijklmnopqrstuvwxyz0123456789"
+ escaped = ""
+ for v in string:
+ if v not in safeBytes:
+ hexCode = "{0:02x}".format(ord(v))
+ escaped += "-" + hexCode
+ else:
+ escaped += v
+ return escaped
+
+
+def get_configs() -> (str, str):
+ table_name = os.environ.get("PAY_MODELS_DYNAMODB_TABLE")
+ if not table_name:
+ raise Exception("Missing 'PAY_MODELS_DYNAMODB_TABLE' environment variable")
+
+ ecr_role_arn = os.environ.get("ECR_ACCESS_JOB_ARN")
+ if not ecr_role_arn:
+ raise Exception("Missing 'ECR_ACCESS_JOB_ARN' environment variable")
+
+ return table_name, ecr_role_arn
+
+
+def query_usernames_and_account_ids(table_name: str) -> List[dict]:
+ """
+ Returns:
+ List[dict]: [ { "user_id": "user1@username.com", "account_id": "123456" } ]
+ """
+ if MAIN_ACCOUNT_CREDS["key_id"]:
+ session = boto3.Session(
+ aws_access_key_id=MAIN_ACCOUNT_CREDS["key_id"],
+ aws_secret_access_key=MAIN_ACCOUNT_CREDS["key_secret"],
+ )
+ else:
+ session = boto3.Session()
+ dynamodb = session.resource("dynamodb", region_name=REGION)
+ table = dynamodb.Table(table_name)
+
+ # get usernames and AWS account IDs from DynamoDB
+ queried_keys = ["user_id", "account_id"]
+ filter_expr = Attr("workspace_type").eq("Direct Pay")
+ proj = ", ".join("#" + key for key in queried_keys)
+ expr = {"#" + key: key for key in queried_keys}
+ response = table.scan(
+ FilterExpression=filter_expr,
+ ProjectionExpression=proj,
+ ExpressionAttributeNames=expr,
+ )
+ assert response.get("ResponseMetadata", {}).get("HTTPStatusCode") == 200, response
+ items = response["Items"]
+ # if the response is paginated, get the rest of the items
+ while response["Count"] > 0:
+ if "LastEvaluatedKey" not in response:
+ break
+ response = table.scan(
+ FilterExpression=filter_expr,
+ ProjectionExpression=proj,
+ ExpressionAttributeNames=expr,
+ ExclusiveStartKey=response["LastEvaluatedKey"],
+ )
+ assert (
+ response.get("ResponseMetadata", {}).get("HTTPStatusCode") == 200
+ ), response
+ items.extend(response["Items"])
+
+ return items
+
+
+def update_access_in_ecr(repo_to_account_ids: List[dict], ecr_role_arn: str) -> None:
+ # get access to ECR in the account that contains the ECR repos
+ if MAIN_ACCOUNT_CREDS["key_id"]:
+ sts = boto3.client(
+ "sts",
+ aws_access_key_id=MAIN_ACCOUNT_CREDS["key_id"],
+ aws_secret_access_key=MAIN_ACCOUNT_CREDS["key_secret"],
+ )
+ else:
+ sts = boto3.client("sts")
+ assumed_role = sts.assume_role(
+ RoleArn=ecr_role_arn,
+ DurationSeconds=900, # minimum time for aws assume role as per boto docs
+ RoleSessionName=f"ecr-access-assume-role-{str(uuid.uuid4())[:8]}",
+ )
+ assert "Credentials" in assumed_role, "Unable to assume role"
+ ecr = boto3.client(
+ "ecr",
+ aws_access_key_id=assumed_role["Credentials"]["AccessKeyId"],
+ aws_secret_access_key=assumed_role["Credentials"]["SecretAccessKey"],
+ aws_session_token=assumed_role["Credentials"]["SessionToken"],
+ )
+
+ # for each ECR repo, whitelist the account IDs so users can access the repo
+ for repo, account_ids in repo_to_account_ids.items():
+ print(f"Allowing AWS accounts {account_ids} to use ECR repository '{repo}'")
+ policy = {
+ "Version": "2008-10-17",
+ "Statement": [
+ {
+ "Sid": "AllowCrossAccountPull",
+ "Effect": "Allow",
+ "Principal": {
+ "AWS": [
+ f"arn:aws:iam::{account_id}:root"
+ for account_id in account_ids
+ ]
+ },
+ "Action": [
+ "ecr:BatchCheckLayerAvailability",
+ "ecr:BatchGetImage",
+ "ecr:GetAuthorizationToken",
+ "ecr:GetDownloadUrlForLayer",
+ ],
+ }
+ ],
+ }
+ # Note that this is overwriting the repo policy, not appending to it. This means we can't have 2 dynamodb
+ # tables pointing at the same set of ECR repos: the repos would only allow the accounts in the table for
+ # which the script was run most recently. eg QA and Staging can't use the same ECR repos.
+ # Appending is not possible since this code will eventually rely on Arborist for authorization information
+ # and we'll need to overwrite in order to remove expired access.
+ try:
+ ecr.set_repository_policy(
+ repositoryName=repo,
+ policyText=json.dumps(policy),
+ )
+ except Exception as e:
+ print(f" Unable to update '{repo}'; skipping it: {e}")
+
+
+def main() -> None:
+ table_name, ecr_role_arn = get_configs()
+ items = query_usernames_and_account_ids(table_name)
+
+ # construct mapping: { ECR repo url: [ AWS account IDs with access ] }
+ ecr_repo_prefix = "nextflow-approved"
+ repo_to_account_ids = {
+ f"{ecr_repo_prefix}/{escapism(e['user_id'])}": [e["account_id"]]
+ for e in items
+ if "account_id" in e
+ }
+ print(
+ "Mapping of ECR repository to allowed AWS accounts:\n",
+ json.dumps(repo_to_account_ids, indent=2),
+ )
+
+ update_access_in_ecr(repo_to_account_ids, ecr_role_arn)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/files/scripts/healdata/heal-cedar-data-ingest.py b/files/scripts/healdata/heal-cedar-data-ingest.py
index fb2c1f2c1..45098400f 100644
--- a/files/scripts/healdata/heal-cedar-data-ingest.py
+++ b/files/scripts/healdata/heal-cedar-data-ingest.py
@@ -1,14 +1,176 @@
import argparse
+import copy
import json
+import sys
import requests
import pydash
-import os
+from uuid import UUID
+
+# Defines how a field in metadata is going to be mapped into a key in filters
+FILTER_FIELD_MAPPINGS = {
+ "study_metadata.study_type.study_stage": "Study Type",
+ "study_metadata.data.data_type": "Data Type",
+ "study_metadata.study_type.study_subject_type": "Subject Type",
+ "study_metadata.human_subject_applicability.gender_applicability": "Gender",
+ "study_metadata.human_subject_applicability.age_applicability": "Age",
+ "research_program": "Research Program",
+}
+
+# Defines how to handle special cases for values in filters
+SPECIAL_VALUE_MAPPINGS = {
+ "Interview/Focus Group - structured": "Interview/Focus Group",
+ "Interview/Focus Group - semi-structured": "Interview/Focus Group",
+ "Interview/Focus Group - unstructured": "Interview/Focus Group",
+ "Questionnaire/Survey/Assessment - validated instrument": "Questionnaire/Survey/Assessment",
+ "Questionnaire/Survey/Assessment - unvalidated instrument": "Questionnaire/Survey/Assessment",
+ "Cis Male": "Male",
+ "Cis Female": "Female",
+ "Trans Male": "Transgender man/trans man/female-to-male (FTM)",
+ "Female-to-male transsexual": "Transgender man/trans man/female-to-male (FTM)",
+ "Trans Female": "Transgender woman/trans woman/male-to-female (MTF)",
+ "Male-to-female transsexual": "Transgender woman/trans woman/male-to-female (MTF)",
+ "Agender, Non-binary, gender non-conforming": "Genderqueer/gender nonconforming/neither exclusively male nor female",
+ "Gender Queer": "Genderqueer/gender nonconforming/neither exclusively male nor female",
+ "Intersex": "Genderqueer/gender nonconforming/neither exclusively male nor female",
+ "Intersexed": "Genderqueer/gender nonconforming/neither exclusively male nor female",
+ "Buisness Development": "Business Development",
+}
+
+# Defines field that we don't want to include in the filters
+OMITTED_VALUES_MAPPING = {
+ "study_metadata.human_subject_applicability.gender_applicability": "Not applicable"
+}
+
+# repository links
+REPOSITORY_STUDY_ID_LINK_TEMPLATE = {
+ "NIDDK Central": "https://repository.niddk.nih.gov/studies//",
+ "NIDA Data Share": "https://datashare.nida.nih.gov/study/",
+ "NICHD DASH": "https://dash.nichd.nih.gov/study/",
+ "ICPSR": "https://www.icpsr.umich.edu/web/ICPSR/studies/",
+ "BioSystics-AP": "https://biosystics-ap.com/assays/assaystudy//",
+}
+
+
+def is_valid_uuid(uuid_to_test, version=4):
+ """
+ Check if uuid_to_test is a valid UUID.
+
+ Parameters
+ ----------
+ uuid_to_test : str
+ version : {1, 2, 3, 4}
+
+ Returns
+ -------
+ `True` if uuid_to_test is a valid UUID, otherwise `False`.
+
+ """
+
+ try:
+ uuid_obj = UUID(uuid_to_test, version=version)
+ except ValueError:
+ return False
+ return str(uuid_obj) == uuid_to_test
+
+
+def update_filter_metadata(metadata_to_update):
+ # Retain these from existing filters
+ save_filters = ["Common Data Elements"]
+ filter_metadata = [filter for filter in metadata_to_update["advSearchFilters"] if filter["key"] in save_filters]
+ for metadata_field_key, filter_field_key in FILTER_FIELD_MAPPINGS.items():
+ filter_field_values = pydash.get(metadata_to_update, metadata_field_key)
+ if filter_field_values:
+ if isinstance(filter_field_values, str):
+ filter_field_values = [filter_field_values]
+ if not isinstance(filter_field_values, list):
+ print(filter_field_values)
+ raise TypeError("Neither a string nor a list")
+ for filter_field_value in filter_field_values:
+ if (
+ metadata_field_key,
+ filter_field_value,
+ ) in OMITTED_VALUES_MAPPING.items():
+ continue
+ if filter_field_value in SPECIAL_VALUE_MAPPINGS:
+ filter_field_value = SPECIAL_VALUE_MAPPINGS[filter_field_value]
+ filter_metadata.append(
+ {"key": filter_field_key, "value": filter_field_value}
+ )
+ filter_metadata = pydash.uniq(filter_metadata)
+ metadata_to_update["advSearchFilters"] = filter_metadata
+ # Retain these from existing tags
+ save_tags = ["Data Repository", "Common Data Elements"]
+ tags = [tag for tag in metadata_to_update["tags"] if tag["category"] in save_tags]
+ # Add any new tags from advSearchFilters
+ for f in metadata_to_update["advSearchFilters"]:
+ if f["key"] == "Gender":
+ continue
+ tag = {"name": f["value"], "category": f["key"]}
+ if tag not in tags:
+ tags.append(tag)
+ metadata_to_update["tags"] = tags
+ return metadata_to_update
+
+
+def get_client_token(client_id: str, client_secret: str):
+ try:
+ token_url = f"http://revproxy-service/user/oauth2/token"
+ headers = {"Content-Type": "application/x-www-form-urlencoded"}
+ params = {"grant_type": "client_credentials"}
+ data = "scope=openid user data"
+
+ token_result = requests.post(
+ token_url,
+ params=params,
+ headers=headers,
+ data=data,
+ auth=(client_id, client_secret),
+ )
+ token = token_result.json()["access_token"]
+ except:
+ raise Exception("Could not get token")
+ return token
+
+
+def get_related_studies(serial_num, guid, hostname):
+ related_study_result = []
+
+ if serial_num:
+ mds = requests.get(
+ f"http://revproxy-service/mds/metadata?nih_reporter.project_num_split.serial_num={serial_num}&data=true&limit=2000"
+ )
+ if mds.status_code == 200:
+ related_study_metadata = mds.json()
+
+ for (
+ related_study_metadata_key,
+ related_study_metadata_value,
+ ) in related_study_metadata.items():
+ if related_study_metadata_key == guid or (
+ related_study_metadata_value["_guid_type"] != "discovery_metadata"
+ and related_study_metadata_value["_guid_type"]
+ != "unregistered_discovery_metadata"
+ ):
+ # do nothing for self, or for archived studies
+ continue
+ title = (
+ related_study_metadata_value.get("gen3_discovery", {})
+ .get("study_metadata", {})
+ .get("minimal_info", {})
+ .get("study_name", "")
+ )
+ link = (
+ f"https://{hostname}/portal/discovery/{related_study_metadata_key}/"
+ )
+ related_study_result.append({"title": title, "link": link})
+ return related_study_result
parser = argparse.ArgumentParser()
parser.add_argument("--directory", help="CEDAR Directory ID for registering ")
-parser.add_argument("--access_token", help="User access token")
+parser.add_argument("--cedar_client_id", help="The CEDAR client id")
+parser.add_argument("--cedar_client_secret", help="The CEDAR client secret")
parser.add_argument("--hostname", help="Hostname")
@@ -16,65 +178,224 @@
if not args.directory:
print("Directory ID is required!")
- exit(1)
-if not args.access_token:
- print("User access token is required!")
- exit(1)
+ sys.exit(1)
+if not args.cedar_client_id:
+ print("CEDAR client id is required!")
+ sys.exit(1)
+if not args.cedar_client_secret:
+ print("CEDAR client secret is required!")
+ sys.exit(1)
if not args.hostname:
print("Hostname is required!")
- exit(1)
+ sys.exit(1)
dir_id = args.directory
-access_token = args.access_token
+client_id = args.cedar_client_id
+client_secret = args.cedar_client_secret
hostname = args.hostname
-token_header = {"Authorization": 'bearer ' + access_token}
+print("Getting CEDAR client access token")
+access_token = get_client_token(client_id, client_secret)
+token_header = {"Authorization": "bearer " + access_token}
-# Get the metadata from cedar to register
-print("Querying CEDAR...")
-cedar = requests.get(f"https://{hostname}/cedar/get-instance-by-directory/{dir_id}", headers=token_header)
+limit = 10
+offset = 0
-# If we get metadata back now register with MDS
-if cedar.status_code == 200:
- metadata_return = cedar.json()
- if "metadata" not in metadata_return:
- print("Got 200 from CEDAR wrapper but no metadata in body, something is not right!")
- exit(1)
+# initialize this to be bigger than our initial call so we can go through while loop
+total = 100
- print(f"Successfully got {len(metadata_return['metadata'])} record(s) from CEDAR directory")
- for cedar_record in metadata_return["metadata"]:
- if "appl_id" not in cedar_record:
- print("This record doesn't have appl_id, skipping...")
- continue
- cedar_record_id = str(cedar_record["appl_id"])
+if not is_valid_uuid(dir_id):
+ print("Directory ID is not in UUID format!")
+ sys.exit(1)
- # Get the metadata record for the nih_application_id
- mds = requests.get(f"https://{hostname}/mds/metadata/{cedar_record_id}",
- headers=token_header
- )
- if mds.status_code == 200:
- mds_res = mds.json()
- mds_cedar_register_data_body = {}
- mds_discovery_data_body = {}
- if mds_res["_guid_type"] == "discovery_metadata":
- print("Metadata is already registered. Updating MDS record")
- elif mds_res["_guid_type"] == "unregistered_discovery_metadata":
- print("Metadata is has not been registered. Registering it in MDS record")
- pydash.merge(mds_discovery_data_body, mds_res["gen3_discovery"], cedar_record)
- mds_cedar_register_data_body["gen3_discovery"] = mds_discovery_data_body
- mds_cedar_register_data_body["_guid_type"] = "discovery_metadata"
-
- print("Metadata is now being registered.")
- mds_put = requests.put(f"https://{hostname}/mds/metadata/{cedar_record_id}",
- headers=token_header,
- json = mds_cedar_register_data_body
+while limit + offset <= total:
+ # Get the metadata from cedar to register
+ print("Querying CEDAR...")
+ cedar = requests.get(
+ f"http://revproxy-service/cedar/get-instance-by-directory/{dir_id}?limit={limit}&offset={offset}",
+ headers=token_header,
+ )
+
+ # If we get metadata back now register with MDS
+ if cedar.status_code == 200:
+ metadata_return = cedar.json()
+ if "metadata" not in metadata_return:
+ print(
+ "Got 200 from CEDAR wrapper but no metadata in body, something is not right!"
)
- if mds_put.status_code == 200:
- print(f"Successfully registered: {cedar_record_id}")
+ sys.exit(1)
+
+ total = metadata_return["metadata"]["totalCount"]
+ returned_records = len(metadata_return["metadata"]["records"])
+ print(f"Successfully got {returned_records} record(s) from CEDAR directory")
+ for cedar_record in metadata_return["metadata"]["records"]:
+ # get the CEDAR instance id from cedar for querying in our MDS
+ cedar_instance_id = pydash.get(
+ cedar_record, "metadata_location.cedar_study_level_metadata_template_instance_ID"
+ )
+ if cedar_instance_id is None:
+ print("This record doesn't have CEDAR instance id, skipping...")
+ continue
+
+ # Get the metadata record for the CEDAR instance id
+ mds = requests.get(
+ f"http://revproxy-service/mds/metadata?gen3_discovery.study_metadata.metadata_location.cedar_study_level_metadata_template_instance_ID={cedar_instance_id}&data=true"
+ )
+ if mds.status_code == 200:
+ mds_res = mds.json()
+
+ # the query result key is the record of the metadata. If it doesn't return anything then our query failed.
+ if len(list(mds_res.keys())) == 0 or len(list(mds_res.keys())) > 1:
+ print(f"Query returned nothing for template_instance_ID={cedar_instance_id}&data=true")
+ continue
+
+ # get the key for our mds record
+ mds_record_guid = list(mds_res.keys())[0]
+
+ mds_res = mds_res[mds_record_guid]
+ mds_cedar_register_data_body = {**mds_res}
+ mds_discovery_data_body = {}
+ mds_clinical_trials = {}
+ if mds_res["_guid_type"] == "discovery_metadata":
+ print("Metadata is already registered. Updating MDS record")
+ elif mds_res["_guid_type"] == "unregistered_discovery_metadata":
+ print(
+ "Metadata has not been registered. Registering it in MDS record"
+ )
+ else:
+ print(
+ f"This metadata data record has a special GUID type \"{mds_res['_guid_type']}\" and will be skipped"
+ )
+ continue
+
+ if "clinicaltrials_gov" in cedar_record:
+ mds_clinical_trials = cedar_record["clinicaltrials_gov"]
+ del cedar_record["clinicaltrials_gov"]
+
+ # some special handing for this field, because its parent will be deleted before we merging the CEDAR and MDS SLMD to avoid duplicated values
+ cedar_record_other_study_websites = cedar_record.get(
+ "metadata_location", {}
+ ).get("other_study_websites", [])
+ del cedar_record["metadata_location"]
+
+ mds_res["gen3_discovery"]["study_metadata"].update(cedar_record)
+ mds_res["gen3_discovery"]["study_metadata"]["metadata_location"][
+ "other_study_websites"
+ ] = cedar_record_other_study_websites
+
+ # setup citations
+ doi_citation = mds_res["gen3_discovery"]["study_metadata"].get(
+ "doi_citation", ""
+ )
+ mds_res["gen3_discovery"]["study_metadata"]["citation"][
+ "heal_platform_citation"
+ ] = doi_citation
+
+ # setup repository_study_link
+ data_repositories = (
+ mds_res.get("gen3_discovery", {})
+ .get("study_metadata", {})
+ .get("metadata_location", {})
+ .get("data_repositories", [])
+ )
+ repository_citation = "Users must also include a citation to the data as specified by the local repository."
+ repository_citation_additional_text = ' The link to the study page at the local repository can be found in the "Data" tab.'
+ for repository in data_repositories:
+ if (
+ repository["repository_name"]
+ and repository["repository_name"]
+ in REPOSITORY_STUDY_ID_LINK_TEMPLATE
+ and repository["repository_study_ID"]
+ ):
+ repository_study_link = REPOSITORY_STUDY_ID_LINK_TEMPLATE[
+ repository["repository_name"]
+ ].replace("", repository["repository_study_ID"])
+ repository.update(
+ {"repository_study_link": repository_study_link}
+ )
+ if (
+ repository_citation_additional_text
+ not in repository_citation
+ ):
+ repository_citation += repository_citation_additional_text
+ if len(data_repositories):
+ data_repositories[0] = {
+ **data_repositories[0],
+ "repository_citation": repository_citation,
+ }
+
+ mds_res["gen3_discovery"]["study_metadata"]["metadata_location"][
+ "data_repositories"
+ ] = copy.deepcopy(data_repositories)
+
+ # set up related studies
+ serial_num = None
+ try:
+ serial_num = (
+ mds_res.get("nih_reporter", {})
+ .get("project_num_split", {})
+ .get("serial_num", None)
+ )
+ except Exception:
+ print("Unable to get serial number for study")
+
+ if serial_num is None:
+ print("Unable to get serial number for study")
+
+ related_study_result = get_related_studies(
+ serial_num, mds_record_guid, hostname
+ )
+ mds_res["gen3_discovery"]["related_studies"] = copy.deepcopy(related_study_result)
+
+ # merge data from cedar that is not study level metadata into a level higher
+ deleted_keys = []
+ for key, value in mds_res["gen3_discovery"]["study_metadata"].items():
+ if not isinstance(value, dict):
+ mds_res["gen3_discovery"][key] = value
+ deleted_keys.append(key)
+ for key in deleted_keys:
+ del mds_res["gen3_discovery"]["study_metadata"][key]
+
+ mds_discovery_data_body = update_filter_metadata(
+ mds_res["gen3_discovery"]
+ )
+
+ mds_cedar_register_data_body["gen3_discovery"] = mds_discovery_data_body
+ if mds_clinical_trials:
+ mds_cedar_register_data_body["clinicaltrials_gov"] = {
+ **mds_cedar_register_data_body.get("clinicaltrials_gov", {}),
+ **mds_clinical_trials,
+ }
+
+ mds_cedar_register_data_body["_guid_type"] = "discovery_metadata"
+
+ print(f"Metadata {mds_record_guid} is now being registered.")
+ mds_put = requests.put(
+ f"http://revproxy-service/mds/metadata/{mds_record_guid}",
+ headers=token_header,
+ json=mds_cedar_register_data_body,
+ )
+ if mds_put.status_code == 200:
+ print(f"Successfully registered: {mds_record_guid}")
+ else:
+ print(
+ f"Failed to register: {mds_record_guid}. Might not be MDS admin"
+ )
+ print(f"Status from MDS: {mds_put.status_code}")
else:
- print(f"Failed to register: {cedar_record_id}. Might not be MDS admin")
- print(f"Status from MDS: {mds_put.status_code}")
- else:
- print(f"Failed to get information from MDS: {mds.status_code}")
-else:
- print(f"Failed to get information from CEDAR wrapper service: {cedar.status_code}")
+ print(f"Failed to get information from MDS: {mds.status_code}")
+
+ else:
+ print(
+ f"Failed to get information from CEDAR wrapper service: {cedar.status_code}"
+ )
+
+ if offset + limit == total:
+ break
+
+ offset = offset + limit
+ if (offset + limit) > total:
+ limit = total - offset
+
+ if limit < 0:
+ break
diff --git a/files/squid_whitelist/web_whitelist b/files/squid_whitelist/web_whitelist
index 9955eff9c..4d4c8f393 100644
--- a/files/squid_whitelist/web_whitelist
+++ b/files/squid_whitelist/web_whitelist
@@ -7,20 +7,22 @@ achecker.ca
apache.github.io
api.epigraphdb.org
api.monqcle.com
+awslabs.github.io
biodata-integration-tests.net
-biorender.com
+marketing.biorender.com
clinicaltrials.gov
+charts.bitnami.com
ctds-planx.atlassian.net
data.cityofchicago.org
dataguids.org
api.login.yahoo.com
-api.snapcraft.io
apt.kubernetes.io
argoproj.github.io
archive.cloudera.com
archive.linux.duke.edu
aws.github.io
bay.uchicago.edu
+bioconductor.org
bionimbus.tabix.oicrsofteng.org
bits.netbeans.org
centos.chicago.waneq.com
@@ -33,6 +35,7 @@ cernvm.cern.ch
charts.bitnami.com
charts.helm.sh
cloud.r-project.org
+coredns.github.io
coreos.com
covidstoplight.org
cpan.mirrors.tds.net
@@ -68,20 +71,24 @@ ftp.usf.edu
ftp.ussg.iu.edu
fmwww.bc.edu
gcr.io
-gen3.org
get.helm.sh
+ghcr.io
git.io
go.googlesource.com
golang.org
gopkg.in
grafana.com
+grafana.github.io
+helm.elastic.co
http.us.debian.org
ifconfig.io
+ingress.coralogix.us
internet2.edu
k8s.gcr.io
ks.osdc.io
kubecost.github.io
kubernetes.github.io
+kubernetes-sigs.github.io
lib.stat.cmu.edu
login.mathworks.com
login.microsoftonline.com
@@ -111,14 +118,19 @@ mirrors.gigenet.com
mirrors.lga7.us.voxel.net
mirrors.nics.utk.edu
mirrors.syringanetworks.net
+mps.csb.pitt.edu
mran.microsoft.com
neuro.debian.net
neurodeb.pirsquared.org
nginx.org
+nvidia.github.io
opportunityinsights.org
orcid.org
pgp.mit.edu
ppa.launchpad.net
+prometheus-community.github.io
+proxy.golang.org
+public.ecr.aws
pubmirrors.dal.corespace.com
reflector.westga.edu
registry.npmjs.org
@@ -133,8 +145,10 @@ repo.dimenoc.com
repos.mia.quadranet.com
repos.redrockhost.com
repos.sensuapp.org
+repo.vmware.com
repository.cloudera.com
resource.metadatacenter.org
+rmq.n3c.ncats.io
rules.emergingthreats.net
rweb.quant.ku.edu
sa-update.dnswl.org
@@ -143,6 +157,7 @@ sa-update.space-pro.be
security.debian.org
services.mathworks.com
streaming.stat.iastate.edu
+uc-cdis.github.io
us-east4-docker.pkg.dev
us-central1-docker.pkg.dev
www.google.com
@@ -153,3 +168,5 @@ www.rabbitmq.com
www.uniprot.org
vpodc.org
yahoo.com
+idp.stage.qdr.org
+stage.qdr.org
\ No newline at end of file
diff --git a/files/squid_whitelist/web_wildcard_whitelist b/files/squid_whitelist/web_wildcard_whitelist
index 1421f6d5d..1717b4443 100644
--- a/files/squid_whitelist/web_wildcard_whitelist
+++ b/files/squid_whitelist/web_wildcard_whitelist
@@ -11,6 +11,7 @@
.bioconductor.org
.bionimbus.org
.bitbucket.org
+.blob.core.windows.net
.bloodpac.org
.braincommons.org
.bsc.es
@@ -21,6 +22,7 @@
.centos.org
.ceph.com
.chef.io
+.chordshealth.org
.clamav.net
.cloud.google.com
.cloudfront.net
@@ -31,15 +33,19 @@
.data-commons.org
.datadoghq.com
.datastage.io
+.ddog-gov.com
.diseasedatahub.org
.docker.com
.docker.io
.dockerproject.org
.dph.illinois.gov
.elasticsearch.org
+.eramba.org
.erlang-solutions.com
+.external-secrets.io
.extjs.com
.fedoraproject.org
+.gen3.org
.genome.jp
.github.com
.githubusercontent.com
@@ -93,9 +99,12 @@
.sks-keyservers.net
.slack.com
.slack-msgs.com
+.snapcraft.io
+.snapcraftcontent.com
.sourceforge.net
.southsideweekly.com
.theanvil.io
+.tigera.io
.twistlock.com
.ubuntu.com
.ucsc.edu
diff --git a/flavors/eks/bootstrap-explicit-proxy-docker.sh b/flavors/eks/bootstrap-explicit-proxy-docker.sh
index 13d181d03..091be1b18 100644
--- a/flavors/eks/bootstrap-explicit-proxy-docker.sh
+++ b/flavors/eks/bootstrap-explicit-proxy-docker.sh
@@ -1,3 +1,9 @@
+MIME-Version: 1.0
+Content-Type: multipart/mixed; boundary="BOUNDARY"
+
+--BOUNDARY
+Content-Type: text/x-shellscript; charset="us-ascii"
+
#!/bin/bash -xe
# User data for our EKS worker nodes basic arguments to call the bootstrap script for EKS images
@@ -52,3 +58,21 @@ if [[ ! -z "${activation_id}" ]] || [[ ! -z "${customer_id}" ]]; then
rm qualys-cloud-agent.x86_64.rpm
sudo /usr/local/qualys/cloud-agent/bin/qualys-cloud-agent.sh ActivationId=${activation_id} CustomerId=${customer_id}
fi
+
+sudo yum update -y
+sudo yum install -y dracut-fips openssl >> /opt/fips-install.log
+sudo dracut -f
+# configure grub
+sudo /sbin/grubby --update-kernel=ALL --args="fips=1"
+
+--BOUNDARY
+Content-Type: text/cloud-config; charset="us-ascii"
+
+power_state:
+ delay: now
+ mode: reboot
+ message: Powering off
+ timeout: 2
+ condition: true
+
+--BOUNDARY--
\ No newline at end of file
diff --git a/flavors/eks/bootstrap-with-security-updates.sh b/flavors/eks/bootstrap-with-security-updates.sh
index 1e6a0b7eb..06d962f55 100644
--- a/flavors/eks/bootstrap-with-security-updates.sh
+++ b/flavors/eks/bootstrap-with-security-updates.sh
@@ -1,3 +1,9 @@
+MIME-Version: 1.0
+Content-Type: multipart/mixed; boundary="BOUNDARY"
+
+--BOUNDARY
+Content-Type: text/x-shellscript; charset="us-ascii"
+
#!/bin/bash -xe
# User data for our EKS worker nodes basic arguments to call the bootstrap script for EKS images
@@ -76,3 +82,21 @@ if [[ ! -z "${activation_id}" ]] || [[ ! -z "${customer_id}" ]]; then
rm qualys-cloud-agent.x86_64.rpm
sudo /usr/local/qualys/cloud-agent/bin/qualys-cloud-agent.sh ActivationId=${activation_id} CustomerId=${customer_id}
fi
+
+sudo yum update -y
+sudo yum install -y dracut-fips openssl >> /opt/fips-install.log
+sudo dracut -f
+# configure grub
+sudo /sbin/grubby --update-kernel=ALL --args="fips=1"
+
+--BOUNDARY
+Content-Type: text/cloud-config; charset="us-ascii"
+
+power_state:
+ delay: now
+ mode: reboot
+ message: Powering off
+ timeout: 2
+ condition: true
+
+--BOUNDARY--
\ No newline at end of file
diff --git a/flavors/eks/bootstrap.sh b/flavors/eks/bootstrap.sh
index f5dbcf55e..7dda384d7 100644
--- a/flavors/eks/bootstrap.sh
+++ b/flavors/eks/bootstrap.sh
@@ -1,3 +1,9 @@
+MIME-Version: 1.0
+Content-Type: multipart/mixed; boundary="BOUNDARY"
+
+--BOUNDARY
+Content-Type: text/x-shellscript; charset="us-ascii"
+
#!/bin/bash -xe
# User data for our EKS worker nodes basic arguments to call the bootstrap script for EKS images
@@ -25,3 +31,21 @@ if [[ ! -z "${activation_id}" ]] || [[ ! -z "${customer_id}" ]]; then
rm qualys-cloud-agent.x86_64.rpm
sudo /usr/local/qualys/cloud-agent/bin/qualys-cloud-agent.sh ActivationId=${activation_id} CustomerId=${customer_id}
fi
+
+sudo yum update -y
+sudo yum install -y dracut-fips openssl >> /opt/fips-install.log
+sudo dracut -f
+# configure grub
+sudo /sbin/grubby --update-kernel=ALL --args="fips=1"
+
+--BOUNDARY
+Content-Type: text/cloud-config; charset="us-ascii"
+
+power_state:
+ delay: now
+ mode: reboot
+ message: Powering off
+ timeout: 2
+ condition: true
+
+--BOUNDARY--
\ No newline at end of file
diff --git a/flavors/squid_auto/squid_running_on_docker.sh b/flavors/squid_auto/squid_running_on_docker.sh
index 05607f304..812a9f738 100644
--- a/flavors/squid_auto/squid_running_on_docker.sh
+++ b/flavors/squid_auto/squid_running_on_docker.sh
@@ -8,6 +8,9 @@ DISTRO=$(awk -F '[="]*' '/^NAME/ { print $2 }' < /etc/os-release)
WORK_USER="ubuntu"
if [[ $DISTRO == "Amazon Linux" ]]; then
WORK_USER="ec2-user"
+ if [[ $(awk -F '[="]*' '/^VERSION_ID/ { print $2 }' < /etc/os-release) == "2023" ]]; then
+ DISTRO="al2023"
+ fi
fi
HOME_FOLDER="/home/${WORK_USER}"
SUB_FOLDER="${HOME_FOLDER}/cloud-automation"
@@ -60,6 +63,8 @@ fi
function install_basics(){
if [[ $DISTRO == "Ubuntu" ]]; then
apt -y install atop
+ elif [[ $DISTRO == "al2023" ]]; then
+ sudo dnf install cronie nc -y
fi
}
@@ -69,10 +74,18 @@ function install_docker(){
# Docker
###############################################################
# Install docker from sources
- curl -fsSL ${DOCKER_DOWNLOAD_URL}/gpg | sudo apt-key add -
- add-apt-repository "deb [arch=amd64] ${DOCKER_DOWNLOAD_URL} $(lsb_release -cs) stable"
- apt update
- apt install -y docker-ce
+ if [[ $DISTRO == "Ubuntu" ]]; then
+ curl -fsSL ${DOCKER_DOWNLOAD_URL}/gpg | sudo apt-key add -
+ add-apt-repository "deb [arch=amd64] ${DOCKER_DOWNLOAD_URL} $(lsb_release -cs) stable"
+ apt update
+ apt install -y docker-ce
+ else
+ sudo yum update -y
+ sudo yum install -y docker
+ # Start and enable Docker service
+ sudo systemctl start docker
+ sudo systemctl enable docker
+ fi
mkdir -p /etc/docker
cp ${SUB_FOLDER}/flavors/squid_auto/startup_configs/docker-daemon.json /etc/docker/daemon.json
chmod -R 0644 /etc/docker
@@ -201,8 +214,10 @@ function install_awslogs {
if [[ $DISTRO == "Ubuntu" ]]; then
wget ${AWSLOGS_DOWNLOAD_URL} -O amazon-cloudwatch-agent.deb
dpkg -i -E ./amazon-cloudwatch-agent.deb
- else
+ elif [[ $DISTRO == "Amazon Linux" ]]; then
sudo yum install amazon-cloudwatch-agent nc -y
+ elif [[ $DISTRO == "al2023" ]]; then
+ sudo dnf install amazon-cloudwatch-agent -y
fi
# Configure the AWS logs
@@ -292,6 +307,19 @@ function main(){
--volume ${SQUID_CACHE_DIR}:${SQUID_CACHE_DIR} \
--volume ${SQUID_CONFIG_DIR}:${SQUID_CONFIG_DIR}:ro \
quay.io/cdis/squid:${SQUID_IMAGE_TAG}
+
+ max_attempts=10
+ attempt_counter=0
+ while [ $attempt_counter -lt $max_attempts ]; do
+ #((attempt_counter++))
+ sleep 10
+ if [[ -z "$(sudo lsof -i:3128)" ]]; then
+ echo "Squid not healthy, restarting."
+ docker restart squid
+ else
+ echo "Squid healthy"
+ fi
+ done
}
main
diff --git a/flavors/squid_auto/startup_configs/squid.conf b/flavors/squid_auto/startup_configs/squid.conf
index 653026200..b1e44810a 100644
--- a/flavors/squid_auto/startup_configs/squid.conf
+++ b/flavors/squid_auto/startup_configs/squid.conf
@@ -56,7 +56,6 @@ http_access deny all
persistent_request_timeout 5 seconds
-cache_dir ufs /var/cache/squid 100 16 256
pid_filename /var/run/squid/squid.pid
# vi:syntax=squid.conf
diff --git a/flavors/vpn_nlb_central/vpnvm_new.sh b/flavors/vpn_nlb_central/vpnvm_new.sh
new file mode 100644
index 000000000..00f8306fc
--- /dev/null
+++ b/flavors/vpn_nlb_central/vpnvm_new.sh
@@ -0,0 +1,533 @@
+#!/bin/bash
+
+###############################################################
+# variables
+###############################################################
+
+MAGIC_URL="http://169.254.169.254/latest/meta-data/"
+AVAILABILITY_ZONE=$(curl -s ${MAGIC_URL}placement/availability-zone)
+PRIVATE_IPV4=$(curl -s ${MAGIC_URL}local-ipv4)
+PUBLIC_IPV4=$(curl -s ${MAGIC_URL}public-ipv4)
+REGION=$(echo ${AVAILABILITY_ZONE::-1})
+#DOCKER_DOWNLOAD_URL="https://download.docker.com/linux/ubuntu"
+AWSLOGS_DOWNLOAD_URL="https://s3.amazonaws.com/amazoncloudwatch-agent/ubuntu/amd64/latest/amazon-cloudwatch-agent.deb"
+#TERRAFORM_DOWNLOAD_URL="https://releases.hashicorp.com/terraform/0.11.15/terraform_0.11.15_linux_amd64.zip"
+DISTRO=$(awk -F '[="]*' '/^NAME/ { print $2 }' < /etc/os-release)
+if [[ $DISTRO == "Ubuntu" ]]; then
+ WORK_USER="ubuntu"
+else
+ WORK_USER="ec2-user"
+fi
+HOME_FOLDER="/home/${WORK_USER}"
+SUB_FOLDER="${HOME_FOLDER}/cloud-automation"
+
+OPENVPN_PATH='/etc/openvpn'
+BIN_PATH="${OPENVPN_PATH}/bin"
+EASYRSA_PATH="${OPENVPN_PATH}/easy-rsa"
+VARS_PATH="${EASYRSA_PATH}/vars"
+
+#EASY-RSA Vars
+KEY_SIZE=4096
+COUNTRY="US"
+STATE="IL"
+CITY="Chicago"
+ORG="CTDS"
+EMAIL='support\@datacommons.io'
+KEY_EXPIRE=365
+
+#OpenVPN
+PROTO=tcp
+
+
+###############################################################
+# get any variables we want coming from terraform variables
+###############################################################
+if [ $# -eq 0 ];
+then
+ echo "No arguments supplied, something is wrong"
+ exit 1
+else
+ #OIFS=$IFS
+ echo $1
+ IFS=';' read -ra ADDR <<< "$1"
+ echo ${ADDR[@]}
+ for i in "${ADDR[@]}"; do
+ echo $i
+ if [[ $i = *"cwl_group"* ]];
+ then
+ CWL_GROUP="${CWL_GROUP:-$(echo ${i} | cut -d= -f2)}"
+ elif [[ ${i} = *"vpn_nlb_name"* ]];
+ then
+ VPN_NLB_NAME="$(echo ${i} | cut -d= -f2)"
+ elif [[ ${i} = *"cloud_name"* ]];
+ then
+ CLOUD_NAME="$(echo ${i} | cut -d= -f2)"
+ elif [[ ${i} = *"csoc_vpn_subnet"* ]];
+ then
+ CSOC_VPN_SUBNET="$(echo ${i} | cut -d= -f2)"
+ elif [[ ${i} = *"csoc_vm_subnet"* ]];
+ then
+ CSOC_VM_SUBNET="$(echo ${i} | cut -d= -f2)"
+ elif [[ $i = *"account_id"* ]];
+ then
+ ACCOUNT_ID="$(echo ${i} | cut -d= -f2)"
+ elif [[ $i = *"alternate_cwlg"* ]];
+ then
+ CWL_GROUP="$(echo ${i} | cut -d= -f2)"
+ fi
+ done
+ echo $1
+fi
+
+S3_BUCKET="vpn-certs-and-files-${VPN_NLB_NAME}"
+
+function logs_helper(){
+ echo -e "****************** ${1} ******************"
+}
+
+function install_basics() {
+
+ logs_helper "Installing Basics"
+ if [[ $DISTRO == "Ubuntu" ]]; then
+ apt -y install python3-pip build-essential sipcalc wget curl jq apt-transport-https ca-certificates software-properties-common fail2ban libyaml-dev
+ apt -y install postfix mailutils python-virtualenv uuid-runtime lighttpd net-tools
+ apt -y install openvpn bridge-utils libssl-dev openssl zlib1g-dev easy-rsa haveged zip mutt sipcalc python-dev python3-venv
+ # For openVPN
+ debconf-set-selections <<< "postfix postfix/mailname string planx-pla.net"
+ debconf-set-selections <<< "postfix postfix/main_mailer_type string 'Internet Site'"
+ else
+ amazon-linux-extras install epel
+ yum -y -q install epel-release iptables-services
+ yum -y -q install python3-pip python3-devel gcc sipcalc wget curl jq ca-certificates software-properties-common fail2ban libyaml-dev
+ yum -y -q install postfix mailutils python-virtualenv uuid-runtime lighttpd net-tools
+ yum -y -q install openvpn bridge-utils openssl zlib1g-dev easy-rsa haveged zip mutt sipcalc python-dev python3-venv
+ fi
+ pip3 install awscli
+ useradd --shell /bin/nologin --system openvpn
+
+ logs_helper "Basics installed"
+}
+
+
+function configure_basics() {
+
+ logs_helper "Configuring Basics"
+
+ local dest_path="/root/openvpn_management_scripts"
+ local src_path="${SUB_FOLDER}/files/openvpn_management_scripts"
+ cp -r ${src_path} /root
+
+ # Different buckets for different CSOC vpn environments
+ sed -i "s/WHICHVPN/${S3_BUCKET}\/${VPN_NLB_NAME}/" ${dest_path}/push_to_s3.sh
+ sed -i "s/WHICHVPN/${S3_BUCKET}\/${VPN_NLB_NAME}/" ${dest_path}/recover_from_s3.sh
+ sed -i "s/WHICHVPN/${S3_BUCKET}\/${VPN_NLB_NAME}/" ${dest_path}/send_email.sh
+
+ # Replace the User variable for hostname, VPN subnet and VM subnet
+ #sed -i "s/SERVERNAME/${VPN_NLB_NAME}/" ${dest_path}/csoc_vpn_user_variable
+ #sed -i "s/CLOUDNAME/${CLOUD_NAME}/" ${dest_path}/csoc_vpn_user_variable
+
+ #VPN_SUBNET=${CSOC_VPN_SUBNET}
+ #VPN_SUBNET_BASE=$( sipcalc $VPN_SUBNET | perl -ne 'm|Host address\s+-\s+(\S+)| && print "$1"')
+ #VPN_SUBNET_MASK_BITS=$( sipcalc $VPN_SUBNET | perl -ne 'm|Network mask \(bits\)\s+-\s+(\S+)| && print "$1"' )
+ #sed -i "s/VPN_SUBNET/$VPN_SUBNET_BASE\/$VPN_SUBNET_MASK_BITS/" ${dest_path}/csoc_vpn_user_variable
+
+ #VM_SUBNET=${CSOC_VM_SUBNET}
+ #VM_SUBNET_BASE=$( sipcalc $VM_SUBNET | perl -ne 'm|Host address\s+-\s+(\S+)| && print "$1"')
+ #VM_SUBNET_MASK_BITS=$( sipcalc $VM_SUBNET | perl -ne 'm|Network mask \(bits\)\s+-\s+(\S+)| && print "$1"' )
+ #sed -i "s/VM_SUBNET/$VM_SUBNET_BASE\/$VM_SUBNET_MASK_BITS/" ${dest_path}/csoc_vpn_user_variable
+
+ echo "aws s3 ls s3://${S3_BUCKET}/${VPN_NLB_NAME}/ && ${dest_path}/recover_from_s3.sh"
+ aws s3 ls s3://${S3_BUCKET}/${VPN_NLB_NAME}/ && ${dest_path}/recover_from_s3.sh
+
+ logs_helper "Copying modified scripts to /etc/openvpn"
+ cp -vr /root/openvpn_management_scripts /etc/openvpn/
+
+ logs_helper "Basics configured"
+
+}
+
+
+function configure_awscli() {
+
+ logs_helper "Configuring AWS"
+ mkdir -p ${HOME_FOLDER}/.aws
+ cat < ${HOME_FOLDER}/.aws/config
+[default]
+output = json
+region = us-east-1
+
+[profile csoc]
+output = json
+region = us-east-1
+EOT
+
+ mkdir -p /root/.aws
+ cat > /root/.aws/config <> ${config_json} < /root/server.pem
+ fi
+
+ export FQDN=${CLOUD_NAME}
+ export cloud=${VPN_NLB_NAME}
+ export SERVER_PEM="/root/server.pem"
+ export VM_SUBNET=${CSOC_VM_SUBNET}
+ export VM_SUBNET_BASE=$( sipcalc $VM_SUBNET | perl -ne 'm|Host address\s+-\s+(\S+)| && print "$1"')
+ export VM_SUBNET_MASK=$( sipcalc $VM_SUBNET | perl -ne 'm|Network mask\s+-\s+(\S+)| && print "$1"' )
+ export VM_SUBNET_MASK_BITS=$( sipcalc $VM_SUBNET | perl -ne 'm|Network mask \(bits\)\s+-\s+(\S+)| && print "$1"' )
+ export VPN_SUBNET=${CSOC_VPN_SUBNET}
+ export VPN_SUBNET_BASE=$( sipcalc $VPN_SUBNET | perl -ne 'm|Host address\s+-\s+(\S+)| && print "$1"')
+ export VPN_SUBNET_MASK=$( sipcalc $VPN_SUBNET | perl -ne 'm|Network mask\s+-\s+(\S+)| && print "$1"' )
+ export VPN_SUBNET_MASK_BITS=$( sipcalc $VPN_SUBNET | perl -ne 'm|Network mask \(bits\)\s+-\s+(\S+)| && print "$1"' )
+ export server_pem="/root/server.pem"
+ echo "*******"
+ echo "${FQDN} -- ${cloud} -- ${SERVER_PEM} -- ${VPN_SUBNET} -- ${VPN_SUBNET_BASE} -- ${VPN_SUBNET_MASK_BITS} --/ ${VM_SUBNET} -- ${VM_SUBNET_BASE} -- ${VM_SUBNET_MASK_BITS}"
+ echo "*******"
+ #export FQDN="$SERVERNAME.planx-pla.net"; export cloud="$CLOUDNAME"; export SERVER_PEM="/root/server.pem";
+
+ #cp /etc/openvpn/bin/templates/lighttpd.conf.template /etc/lighttpd/lighttpd.conf
+ #mkdir -p --mode=750 /var/www/qrcode
+ #chown openvpn:www-data /var/www/qrcode
+ #mkdir -p /etc/lighttpd/certs
+ #cp /root/server.pem /etc/lighttpd/certs/server.pem
+ #service lighttpd restart
+
+ #systemctl restart openvpn
+
+ logs_helper "openVPN init complete"
+
+}
+
+function install_easyrsa() {
+
+ logs_helper "Installing easyRSA"
+ if [[ -f $EASYRSA_PATH/easyrsa ]];
+ then
+ logs_helper "easyRSA already installed"
+ return
+ fi
+ easyRsaVer="3.1.7"
+ wget https://github.com/OpenVPN/easy-rsa/releases/download/v3.1.7/EasyRSA-${easyRsaVer}.tgz
+ # extract to a folder called easyrsa
+ tar xvf EasyRSA-${easyRsaVer}.tgz
+ mv EasyRSA-${easyRsaVer}/ $EASYRSA_PATH
+ rm EasyRSA-${easyRsaVer}.tgz
+ cp "$OPENVPN_PATH/bin/templates/vars.template" $VARS_PATH
+
+# local easy_rsa_dir="$EASYRSA_PATH"
+# local exthost="$FQDN"
+# local ou="$cloud"
+# local key_name="$ou-OpenVPN"
+
+ perl -p -i -e "s|#EASY_RSA_DIR#|${EASYRSA_PATH}|" $VARS_PATH
+ perl -p -i -e "s|#EXTHOST#|${FQDN}|" $VARS_PATH
+ perl -p -i -e "s|#KEY_SIZE#|${KEY_SIZE}|" $VARS_PATH
+ perl -p -i -e "s|#COUNTRY#|${COUNTRY}|" $VARS_PATH
+ perl -p -i -e "s|#STATE#|${STATE}|" $VARS_PATH
+ perl -p -i -e "s|#CITY#|${CITY}|" $VARS_PATH
+ perl -p -i -e "s|#ORG#|${ORG}|" $VARS_PATH
+ perl -p -i -e "s|#EMAIL#|${EMAIL}|" $VARS_PATH
+ perl -p -i -e "s|#OU#|${cloud}|" $VARS_PATH
+ perl -p -i -e "s|#KEY_NAME#|${cloud}-OpenVPN|" $VARS_PATH
+ perl -p -i -e "s|#KEY_EXPIRE#|${KEY_EXPIRE}|" $VARS_PATH
+
+ sed -i 's/^subjectAltName/#subjectAltName/' $EASYRSA_PATH/openssl-*.cnf
+ logs_helper "easyRSA complete"
+}
+
+function install_custom_scripts() {
+
+ logs_helper "installing custom scripts"
+ cd $OPENVPN_PATH
+
+ #pull our openvpn scripts
+ #cp -r /root/openvpn_management_scripts /etc/openvpn/
+ ln -sfn openvpn_management_scripts bin
+ cd $BIN_PATH
+ python3 -m venv .venv
+ #virtualenv .venv
+ #This is needed or else you get : .venv/bin/activate: line 57: PS1: unbound variable
+ set +u
+ # ( source .venv/bin/activate; pip install pyotp pyqrcode bcrypt )
+ ( source .venv/bin/activate; pip3 install pyotp qrcode bcrypt )
+ set -u
+
+ logs_helper "custom scripts done"
+}
+
+install_settings() {
+
+ logs_helper "installing settings"
+ SETTINGS_PATH="$BIN_PATH/settings.sh"
+ cp "$OPENVPN_PATH/bin/templates/settings.sh.template" "$SETTINGS_PATH"
+ perl -p -i -e "s|#FQDN#|$FQDN|" $SETTINGS_PATH
+ perl -p -i -e "s|#EMAIL#|$EMAIL|" $SETTINGS_PATH
+ perl -p -i -e "s|#CLOUD_NAME#|${cloud}|" $SETTINGS_PATH
+
+ logs_helper "settings installed"
+}
+
+build_PKI() {
+
+ logs_helper "building pki"
+ cd $EASYRSA_PATH
+ # ln -s openssl-1.0.0.cnf openssl.cnf
+ echo "This is long"
+ # ./easyrsa clean-all nopass
+ ./easyrsa init-pki
+ ./easyrsa build-ca nopass
+ ./easyrsa gen-dh
+ ./easyrsa gen-crl
+ ./easyrsa build-server-full $CLOUD_NAME nopass
+ # ./easyrsa gen-req $VPN_NLB_NAME.planx-pla.net nopass
+ openvpn --genkey --secret ta.key
+ mv ta.key $EASYRSA_PATH/pki/ta.key
+
+ #This will error but thats fine, the crl.pem was created (without it openvpn server crashes)
+ set +e
+ ./revoke-full client &>/dev/null || true
+ set -e
+ logs_helper "pki done"
+
+}
+
+configure_ovpn() {
+
+ logs_helper "configuring openvpn"
+ if [[ $DISTRO == "Ubuntu" ]]; then
+ OVPNCONF_PATH="/etc/openvpn/openvpn.conf"
+ else
+ OVPNCONF_PATH="/etc/openvpn/server/server.conf"
+ fi
+ cp "$OPENVPN_PATH/bin/templates/openvpn.conf.template" "$OVPNCONF_PATH"
+
+ perl -p -i -e "s|#FQDN#|$FQDN|" $OVPNCONF_PATH
+
+ perl -p -i -e "s|#VPN_SUBNET_BASE#|$VPN_SUBNET_BASE|" $OVPNCONF_PATH
+ perl -p -i -e "s|#VPN_SUBNET_MASK#|$VPN_SUBNET_MASK|" $OVPNCONF_PATH
+
+ perl -p -i -e "s|#VM_SUBNET_BASE#|$VM_SUBNET_BASE|" $OVPNCONF_PATH
+ perl -p -i -e "s|#VM_SUBNET_MASK#|$VM_SUBNET_MASK|" $OVPNCONF_PATH
+
+ perl -p -i -e "s|#PROTO#|$PROTO|" $OVPNCONF_PATH
+
+ if [[ $DISTRO == "Ubuntu" ]]; then
+ systemctl restart openvpn
+ else
+ systemctl enable openvpn-server@server
+ systemctl start openvpn-server@server
+ fi
+
+ logs_helper "openvpn configured"
+}
+
+tweak_network() {
+
+ logs_helper "tweaking network"
+ local nettweaks_path="$OPENVPN_PATH/bin/network_tweaks.sh"
+ cp "$OPENVPN_PATH/bin/templates/network_tweaks.sh.template" "${nettweaks_path}"
+ perl -p -i -e "s|#VPN_SUBNET#|$VPN_SUBNET|" ${nettweaks_path}
+ perl -p -i -e "s|#VM_SUBNET#|$VM_SUBNET|" ${nettweaks_path}
+ perl -p -i -e "s|#PROTO#|$PROTO|" ${nettweaks_path}
+
+ chmod +x ${nettweaks_path}
+ ${nettweaks_path}
+
+ # Disable firewall in amazonlinux
+ systemctl stop firewalld
+ systemctl disable firewalld
+
+ #cp /etc/rc.local /etc/rc.local.bak
+ #sed -i 's/^exit/#exit/' /etc/rc.local
+ #echo /etc/openvpn/bin/network_tweaks.sh >> /etc/rc.local
+ #echo exit 0 >> /etc/rc.local
+
+
+ logs_helper "network tweaked"
+
+}
+
+install_webserver() {
+
+
+ logs_helper "installing webserver"
+ #Webserver used for QRCodes
+ if [[ $DISTRO == "Ubuntu" ]]; then
+ apt -y install lighttpd
+ else
+ yum -y install lighttpd
+ fi
+ cp "$OPENVPN_PATH/bin/templates/lighttpd.conf.template" /etc/lighttpd/lighttpd.conf
+
+ mkdir -p --mode=750 /var/www/qrcode
+ chown openvpn:www-data /var/www/qrcode
+
+ if [ -f $SERVER_PEM ]
+ then
+ mkdir --mode=700 /etc/lighttpd/certs
+ cp $SERVER_PEM /etc/lighttpd/certs/server.pem
+ service lighttpd restart
+ fi
+
+ logs_helper "webserver installed"
+}
+
+
+install_cron() {
+ cp "$OPENVPN_PATH/bin/templates/cron.template" /etc/cron.d/openvpn
+}
+
+misc() {
+
+ logs_helper "installing misc"
+ cd $OPENVPN_PATH
+ mkdir -p easy-rsa/pki/ovpn_files
+ ln -sfn easy-rsa/pki/ovpn_files
+
+ #If openvpn fails to start its cause perms. Init needs root rw to start, but service needs openvpn rw to work
+ mkdir --mode 775 -p clients.d/
+ mkdir --mode 775 -p clients.d/tmp/
+ chown root:openvpn clients.d/tmp/
+
+ mkdir -p easy-rsa/pki/ovpn_files_seperated/
+ mkdir -p easy-rsa/pki/ovpn_files_systemd/
+ mkdir -p easy-rsa/pki/ovpn_files_resolvconf/
+
+ touch user_passwd.csv
+
+ mkdir -p environments
+ mkdir -p client-restrictions
+
+ chown -R openvpn:openvpn easy-rsa/ user_passwd.csv clients.d/tmp/
+ #ahhem.
+ chown :root /etc/openvpn/clients.d/tmp
+ chmod g+rwx /etc/openvpn/clients.d/tmp
+ # systemctl restart openvpn
+
+ logs_helper "misc done"
+}
+
+function main() {
+ install_basics
+ configure_awscli
+ configure_basics
+
+ if [[ $DISTRO == "Ubuntu" ]]; then
+ install_awslogs
+ fi
+ install_openvpn
+
+ set -e
+ set -u
+ install_custom_scripts
+ # if [! -d "/etc/openvpn/easy-rsa"]; then
+ aws s3 ls s3://${S3_BUCKET}/${VPN_NLB_NAME}/ || install_easyrsa
+
+ install_settings
+
+ # if [! -d "/etc/openvpn/easy-rsa"]; then
+ aws s3 ls s3://${S3_BUCKET}/${VPN_NLB_NAME}/ || build_PKI
+ #fi
+ misc
+ configure_ovpn
+ tweak_network
+
+ install_cron
+
+
+ mkdir -p --mode=750 /var/www/qrcode
+
+ logs_helper "openvpn setup complete"
+
+}
+
+main
diff --git a/gen3/bin/awsrole.sh b/gen3/bin/awsrole.sh
index 476e7d003..dd19ea7a4 100644
--- a/gen3/bin/awsrole.sh
+++ b/gen3/bin/awsrole.sh
@@ -20,18 +20,22 @@ gen3_awsrole_help() {
# NOTE: service-account to role is 1 to 1
#
# @param serviceAccount to link to the role
+# @param flag (optional) - specify a flag to use a different trust policy
#
function gen3_awsrole_ar_policy() {
local serviceAccount="$1"
shift || return 1
- if [[ ! -z $1 ]]; then
- local namespace=$1
+ if [[ -z $1 ]] || [[ $1 == -* ]]; then
+ namespace=$(gen3 db namespace)
else
- local namespace=$(gen3 db namespace)
+ namespace=$1
+ shift
fi
local issuer_url
local account_id
local vpc_name
+ local flag=$flag
+
vpc_name="$(gen3 api environment)" || return 1
issuer_url="$(aws eks describe-cluster \
--name ${vpc_name} \
@@ -42,7 +46,42 @@ function gen3_awsrole_ar_policy() {
local provider_arn="arn:aws:iam::${account_id}:oidc-provider/${issuer_url}"
- cat - < config.tfvars
@@ -182,10 +226,14 @@ gen3_awsrole_create() {
gen3_log_err "use: gen3 awsrole create roleName saName"
return 1
fi
- if [[ ! -z $1 ]]; then
- local namespace=$1
+ if [[ -z $1 ]] || [[ $1 == -* ]]; then
+ namespace=$(gen3 db namespace)
else
- local namespace=$(gen3 db namespace)
+ namespace=$1
+ shift
+ fi
+ if [[ ! -z $1 ]]; then
+ flag=$1
fi
# do simple validation of name
local regexp="^[a-z][a-z0-9\-]*$"
@@ -200,6 +248,7 @@ EOF
return 1
fi
+
# check if the name is already used by another entity
local entity_type
entity_type=$(_get_entity_type $rolename)
@@ -216,9 +265,11 @@ EOF
fi
TF_IN_AUTOMATION="true"
- if ! _tfplan_role $rolename $saName $namespace; then
+
+ if ! _tfplan_role $rolename $saName $namespace $flag; then
return 1
fi
+
if ! _tfapply_role $rolename; then
return 1
fi
@@ -367,4 +418,4 @@ gen3_awsrole() {
# Let testsuite source file
if [[ -z "$GEN3_SOURCE_ONLY" ]]; then
gen3_awsrole "$@"
-fi
+fi
\ No newline at end of file
diff --git a/gen3/bin/create-es7-cluster.sh b/gen3/bin/create-es7-cluster.sh
new file mode 100644
index 000000000..553dc2652
--- /dev/null
+++ b/gen3/bin/create-es7-cluster.sh
@@ -0,0 +1,64 @@
+#!/bin/bash
+
+source "${GEN3_HOME}/gen3/lib/utils.sh"
+gen3_load "gen3/gen3setup"
+
+# Save the new and old cluster names to vars
+environment=`gen3 api environment`
+existing_cluster_name="$environment-gen3-metadata"
+new_cluster_name="$environment-gen3-metadata-2"
+
+# Gather existing cluster information
+cluster_info=$(aws es describe-elasticsearch-domain --domain-name "$existing_cluster_name")
+
+# Extract relevant information from the existing cluster
+instance_type=`echo "$cluster_info" | jq -r '.DomainStatus.ElasticsearchClusterConfig.InstanceType'`
+instance_count=`echo "$cluster_info" | jq -r '.DomainStatus.ElasticsearchClusterConfig.InstanceCount'`
+volume_type=`echo "$cluster_info" | jq -r '.DomainStatus.EBSOptions.VolumeType'`
+volume_size=`echo "$cluster_info" | jq -r '.DomainStatus.EBSOptions.VolumeSize'`
+vpc_name=`echo "$cluster_info" | jq -r '.DomainStatus.VPCOptions.VPCId'`
+subnet_ids=`echo "$cluster_info" | jq -r '.DomainStatus.VPCOptions.SubnetIds[]'`
+security_groups=`echo "$cluster_info" | jq -r '.DomainStatus.VPCOptions.SecurityGroupIds[]'`
+access_policies=`echo "$cluster_info" | jq -r '.DomainStatus.AccessPolicies'`
+kms_key_id=`echo "$cluster_info" | jq -r '.DomainStatus.EncryptionAtRestOptions.KmsKeyId'`
+
+# Check if the new Elasticsearch cluster name already exists
+new_cluster=`aws es describe-elasticsearch-domain --domain-name "$new_cluster_name"`
+
+if [ -n "$new_cluster" ]; then
+ echo "Cluster $new_cluster_name already exists"
+else
+ echo "Cluster does not exist- creating..."
+ # Create the new Elasticsearch cluster
+ aws es create-elasticsearch-domain \
+ --domain-name "$new_cluster_name" \
+ --elasticsearch-version "7.10" \
+ --elasticsearch-cluster-config \
+ "InstanceType=$instance_type,InstanceCount=$instance_count" \
+ --ebs-options \
+ "EBSEnabled=true,VolumeType=$volume_type,VolumeSize=$volume_size" \
+ --vpc-options "SubnetIds=${subnet_ids[*]},SecurityGroupIds=${security_groups[*]}" \
+ --access-policies "$access_policies" \
+ --encryption-at-rest-options "Enabled=true,KmsKeyId=$kms_key_id"\
+ --node-to-node-encryption-options "Enabled=true"
+ > /dev/null 2>&1
+
+ # Wait for the new cluster to be available
+ sleep_duration=60
+ max_retries=10
+ retry_count=0
+
+ while [ $retry_count -lt $max_retries ]; do
+ cluster_status=$(aws es describe-elasticsearch-domain --domain-name "$new_cluster_name" | jq -r '.DomainStatus.Processing')
+ if [ "$cluster_status" != "true" ]; then
+ echo "New cluster is available."
+ break
+ fi
+ sleep $sleep_duration
+ ((retry_count++))
+ done
+
+ if [ $retry_count -eq $max_retries ]; then
+ echo "New cluster creation may still be in progress. Please check the AWS Management Console for the status."
+ fi
+fi
diff --git a/gen3/bin/dbbackup.sh b/gen3/bin/dbbackup.sh
new file mode 100644
index 000000000..eb9611a90
--- /dev/null
+++ b/gen3/bin/dbbackup.sh
@@ -0,0 +1,212 @@
+#!/bin/bash
+
+####################################################################################################
+# Script: dbdump.sh
+#
+# Description:
+# This script facilitates the management of database backups within the gen3 environment. It is
+# equipped to establish policies, service accounts, roles, and S3 buckets. Depending on the
+# command provided, it will either initiate a database dump or perform a restore.
+#
+# Usage:
+# gen3 dbbackup [dump|restore]
+#
+# dump - Initiates a database dump, creating the essential AWS resources if they are absent.
+# The dump operation is intended to be executed from the namespace/commons that requires
+# the backup.
+# restore - Initiates a database restore, creating the essential AWS resources if they are absent.
+# The restore operation is meant to be executed in the target namespace, where the backup
+# needs to be restored.
+#
+# Notes:
+# This script extensively utilizes the AWS CLI and the gen3 CLI. Proper functioning demands a
+# configured gen3 environment and the availability of the necessary CLI tools.
+#
+####################################################################################################
+
+# Exit on error
+#set -e
+
+# Print commands before executing
+#set -x
+
+#trap 'echo "Error at Line $LINENO"' ERR
+
+source "${GEN3_HOME}/gen3/lib/utils.sh"
+gen3_load "gen3/lib/kube-setup-init"
+
+policy_name="bucket_reader_writer_gen3_db_backup"
+account_id=$(aws sts get-caller-identity --query "Account" --output text)
+vpc_name="$(gen3 api environment)"
+namespace="$(gen3 db namespace)"
+sa_name="dbbackup-sa"
+bucket_name="gen3-db-backups-${account_id}"
+
+gen3_log_info "policy_name: $policy_name"
+gen3_log_info "account_id: $account_id"
+gen3_log_info "vpc_name: $vpc_name"
+gen3_log_info "namespace: $namespace"
+gen3_log_info "sa_name: $sa_name"
+gen3_log_info "bucket_name: $bucket_name"
+
+
+# Create an S3 access policy if it doesn't exist
+create_policy() {
+ # Check if policy exists
+ if ! aws iam list-policies --query "Policies[?PolicyName == '$policy_name'] | [0].Arn" --output text | grep -q "arn:aws:iam"; then
+ # Create the S3 access policy - policy document
+ access_policy=$(cat <<-EOM
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": [
+ "s3:CreateBucket",
+ "s3:PutObject",
+ "s3:GetObject",
+ "s3:ListBucket",
+ "s3:DeleteObject"
+ ],
+ "Resource": [
+ "arn:aws:s3:::gen3-db-backups-*"
+ ]
+ }
+ ]
+}
+EOM
+ )
+
+ # Create the S3 access policy from the policy document
+ policy_arn=$(aws iam create-policy --policy-name "$policy_name" --policy-document "$access_policy" --query "Policy.Arn" --output text)
+ gen3_log_info "policy_arn: $policy_arn"
+ else
+ gen3_log_info "Policy $policy_name already exists, skipping policy creation."
+ policy_arn=$(aws iam list-policies --query "Policies[?PolicyName == '$policy_name'] | [0].Arn" --output text | grep "arn:aws:iam" | head -n 1)
+ gen3_log_info "policy_arn: $policy_arn"
+ fi
+}
+
+
+# Create or update the Service Account and its corresponding IAM Role
+create_service_account_and_role() {
+ cluster_arn=$(kubectl config current-context)
+ eks_cluster=$(echo "$cluster_arn" | awk -F'/' '{print $2}')
+ oidc_url=$(aws eks describe-cluster --name $eks_cluster --query 'cluster.identity.oidc.issuer' --output text | sed -e 's/^https:\/\///')
+ role_name="${vpc_name}-${namespace}-${sa_name}-role"
+ role_arn="arn:aws:iam::${account_id}:role/${role_name}"
+ local trust_policy=$(mktemp -p "$XDG_RUNTIME_DIR" "tmp_policy.XXXXXX")
+ gen3_log_info "trust_policy: $trust_policy"
+ gen3_log_info "eks_cluster: $eks_cluster"
+ gen3_log_info "oidc_url: $oidc_url"
+ gen3_log_info "role_name: $role_name"
+
+
+ cat > ${trust_policy} <&1; then
+ gen3_log_info "Updating existing role: $role_name"
+ aws iam update-assume-role-policy --role-name $role_name --policy-document "file://$trust_policy"
+ else
+ gen3_log_info "Creating new role: $role_name"
+ aws iam create-role --role-name $role_name --assume-role-policy-document "file://$trust_policy"
+ fi
+
+ # Attach the policy to the IAM role
+ aws iam attach-role-policy --role-name $role_name --policy-arn $policy_arn
+
+ # Create the Kubernetes service account if it doesn't exist
+ if ! kubectl get serviceaccount -n $namespace $sa_name 2>&1; then
+ kubectl create serviceaccount -n $namespace $sa_name
+ fi
+ # Annotate the KSA with the IAM role ARN
+ gen3_log_info "Annotating Service Account with IAM role ARN"
+ kubectl annotate serviceaccount -n ${namespace} ${sa_name} eks.amazonaws.com/role-arn=${role_arn} --overwrite
+
+}
+
+# Create an S3 bucket if it doesn't exist
+create_s3_bucket() {
+ # Check if bucket already exists
+ if aws s3 ls "s3://$bucket_name" 2>&1 | grep -q 'NoSuchBucket'; then
+ gen3_log_info "Bucket does not exist, creating..."
+ aws s3 mb "s3://$bucket_name"
+ else
+ gen3_log_info "Bucket $bucket_name already exists, skipping bucket creation."
+ fi
+}
+
+
+# Function to trigger the database backup job
+db_dump() {
+ gen3 job run psql-db-prep-dump
+}
+
+
+# Function to trigger the database backup restore job
+db_restore() {
+ gen3 job run psql-db-prep-restore
+}
+
+va_testing_db_dump() {
+ gen3 job run psql-db-dump-va-testing
+}
+
+
+# main function to determine whether dump or restore
+main() {
+ case "$1" in
+ dump)
+ gen3_log_info "Triggering database dump..."
+ create_policy
+ create_service_account_and_role
+ create_s3_bucket
+ db_dump
+ ;;
+ restore)
+ gen3_log_info "Triggering database restore..."
+ create_policy
+ create_service_account_and_role
+ create_s3_bucket
+ db_restore
+ ;;
+ va-dump)
+ gen3_log_info "Running a va-testing DB dump..."
+ create_policy
+ create_service_account_and_role
+ create_s3_bucket
+ va_testing_db_dump
+ ;;
+ *)
+ echo "Invalid command. Usage: gen3 dbbackup [dump|restore|va-dump]"
+ return 1
+ ;;
+ esac
+}
+
+main "$1"
diff --git a/gen3/bin/ecr.sh b/gen3/bin/ecr.sh
index 23254c5de..36af791ef 100644
--- a/gen3/bin/ecr.sh
+++ b/gen3/bin/ecr.sh
@@ -32,6 +32,8 @@ accountList=(
205252583234
885078588865
922467707295
+533267425233
+048463324059
)
principalStr=""
@@ -71,18 +73,34 @@ ecrReg="707767160287.dkr.ecr.us-east-1.amazonaws.com"
# lib -------------------------------
gen3_ecr_login() {
- if gen3_time_since ecr-login is 36000; then
+ if [[ -S /var/run/docker.sock ]]; then
+ if gen3_time_since ecr-login is 36000; then
# re-authenticate every 10 hours
- aws ecr get-login-password --region us-east-1 | docker login --username AWS --password-stdin "707767160287.dkr.ecr.us-east-1.amazonaws.com" 1>&2 || exit 1
+ aws ecr get-login-password --region us-east-1 | docker login --username AWS --password-stdin "707767160287.dkr.ecr.us-east-1.amazonaws.com" 1>&2 || exit 1
+ fi
+ elif [[ -S /var/run/containerd/containerd.sock ]]; then
+ gen3_log_info "Containerd found, logging in during each ctr command"
+ loginCommand="-u AWS:$(aws ecr get-login-password --region us-east-1)"
+ else
+ gen3_log_err "No container runtime found. Exiting"
+ exit 1
fi
}
gen3_quay_login() {
if [[ -f ~/Gen3Secrets/quay/login ]]; then
- if gen3_time_since quay-login is 36000; then
- cat ~/Gen3Secrets/quay/login | docker login --username cdis+gen3 --password-stdin quay.io
+ if [[ -S /var/run/docker.sock ]]; then
+ if gen3_time_since quay-login is 36000; then
+ cat ~/Gen3Secrets/quay/login | docker login --username cdis+gen3 --password-stdin quay.io
+ fi
+ elif [[ -S /var/run/containerd/containerd.sock ]]; then
+ gen3_log_info "Containerd found, logging in during each ctr command"
+ loginCommand="-u \"cdis+gen3\":\"$(cat ~/Gen3Secrets/quay/login)\""
+ else
+ gen3_log_err "No container runtime found. Exiting"
+ exit 1
fi
- else
+ else
gen3_log_err "Place credentials for the quay robot account (cdis+gen3) in this file ~/Gen3Secrets/quay/login"
exit 1
fi
@@ -97,7 +115,8 @@ gen3_quay_login() {
gen3_ecr_copy_image() {
local srcTag="$1"
local destTag="$2"
- if [[ "$destTag" == *"quay.io"* ]]; then
+ loginCommand=""
+ if [[ "$destTag" == *"quay.io"* ]]; then
gen3_quay_login || return 1
else
gen3_ecr_login || return 1
@@ -108,12 +127,23 @@ gen3_ecr_copy_image() {
fi
shift
shift
- (docker pull "$srcTag" && \
- docker tag "$srcTag" "$destTag" && \
- docker push "$destTag"
- ) || return 1
+ if [[ -S /var/run/docker.sock ]]; then
+ (docker pull "$srcTag" && \
+ docker tag "$srcTag" "$destTag" && \
+ docker push "$destTag"
+ ) || return 1
+ docker image rm "$srcTag" "$destTag"
+ elif [[ -S /var/run/containerd/containerd.sock ]]; then
+ (ctr image pull "$srcTag" --all-platforms $loginCommand && \
+ ctr image tag "$srcTag" "$destTag" && \
+ ctr image push "$destTag" $loginCommand
+ ) || return 1
+ ctr image rm "$srcTag" "$destTag"
+ else
+ gen3_log_err "No container runtime found. Exiting"
+ exit 1
+ fi
# save disk space
- docker image rm "$srcTag" "$destTag"
return 0
}
@@ -178,7 +208,7 @@ gen3_ecr_update_all() {
echo $repoList
for repo in $repoList; do
gen3_ecr_update_policy $repo
- done
+ done
}
# Check if the Quay image exists in ECR repository
@@ -203,7 +233,7 @@ gen3_ecr_describe_image() {
# @param repoName
gen3_ecr_create_repo() {
local repoName="gen3/$1"
- aws ecr create-repository --repository-name ${repoName} --image-scanning-configuration scanOnPush=true
+ aws ecr create-repository --repository-name ${repoName} --image-scanning-configuration scanOnPush=true
}
diff --git a/gen3/bin/gitops.sh b/gen3/bin/gitops.sh
index 48ba6512c..bc0358499 100644
--- a/gen3/bin/gitops.sh
+++ b/gen3/bin/gitops.sh
@@ -291,9 +291,15 @@ gen3_gitops_sync() {
if g3kubectl get configmap manifest-versions; then
oldJson=$(g3kubectl get configmap manifest-versions -o=json | jq ".data")
fi
- newJson=$(g3k_config_lookup ".versions")
echo "old JSON is: $oldJson"
- echo "new JSON is: $newJson"
+ newJson=$(g3k_config_lookup ".versions")
+ # Make sure the script exits if newJSON contains invalid JSON
+ if [ $? -ne 0 ]; then
+ echo "Error: g3k_config_lookup command failed- invalid JSON"
+ exit 1
+ else
+ echo "new JSON is: $newJson"
+ fi
if [[ -z $newJson ]]; then
echo "Manifest does not have versions section. Unable to get new versions, skipping version update."
elif [[ -z $oldJson ]]; then
@@ -439,8 +445,13 @@ gen3_gitops_sync() {
echo "DRYRUN flag detected, not rolling"
gen3_log_info "dict_roll: $dict_roll; versions_roll: $versions_roll; portal_roll: $portal_roll; etl_roll: $etl_roll; fence_roll: $fence_roll"
else
- if [[ ( "$dict_roll" = true ) || ( "$versions_roll" = true ) || ( "$portal_roll" = true )|| ( "$etl_roll" = true ) || ( "$covid_cronjob_roll" = true ) || ("fence_roll" = true) ]]; then
+ if [[ ( "$dict_roll" = true ) || ( "$versions_roll" = true ) || ( "$portal_roll" = true )|| ( "$etl_roll" = true ) || ( "$covid_cronjob_roll" = true ) || ("$fence_roll" = true) ]]; then
echo "changes detected, rolling"
+ tmpHostname=$(gen3 api hostname)
+ if [[ $slack = true ]]; then
+ curl -X POST --data-urlencode "payload={\"text\": \"Gitops-sync Cron: Changes detected on ${tmpHostname} - rolling...\"}" "${slackWebHook}"
+ fi
+
# run etl job before roll all so guppy can pick up changes
if [[ "$etl_roll" = true ]]; then
gen3 update_config etl-mapping "$(gen3 gitops folder)/etlMapping.yaml"
@@ -466,7 +477,6 @@ gen3_gitops_sync() {
rollRes=$?
# send result to slack
if [[ $slack = true ]]; then
- tmpHostname=$(gen3 api hostname)
resStr="SUCCESS"
color="#1FFF00"
if [[ $rollRes != 0 ]]; then
diff --git a/gen3/bin/healthcheck.sh b/gen3/bin/healthcheck.sh
index b2973aa04..b658ff033 100644
--- a/gen3/bin/healthcheck.sh
+++ b/gen3/bin/healthcheck.sh
@@ -47,7 +47,7 @@ gen3_healthcheck() {
# refer to k8s api docs for pod status info
# https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.13/#podstatus-v1-core
gen3_log_info "Getting all pods..."
-
+
local allPods=$(g3kubectl get pods --all-namespaces -o json | \
jq -r '[
.items[] | {
@@ -117,7 +117,7 @@ gen3_healthcheck() {
if [[ "$statusCode" -lt 200 || "$statusCode" -ge 400 ]]; then
internetAccess=false
fi
-
+
# check internet access with explicit proxy
gen3_log_info "Checking explicit proxy internet access..."
local http_proxy="http://cloud-proxy.internal.io:3128"
@@ -137,6 +137,10 @@ gen3_healthcheck() {
internetAccessExplicitProxy=false
fi
+ gen3_log_info "Clearing Evicted pods"
+ sleep 5
+ clear_evicted_pods
+
local healthJson=$(cat - < /dev/null; then
gen3_log_err "failed to assemble valid json data: $healthJson"
return 1
@@ -205,4 +209,8 @@ EOM
fi
}
+clear_evicted_pods() {
+ g3kubectl get pods -A -o json | jq '.items[] | select(.status.reason!=null) | select(.status.reason | contains("Evicted")) | "kubectl delete pods \(.metadata.name) -n \(.metadata.namespace)"' | xargs -n 1 bash -c 2> /dev/null || true
+}
+
gen3_healthcheck "$@"
diff --git a/gen3/bin/iam-serviceaccount.sh b/gen3/bin/iam-serviceaccount.sh
index 0c5a8bba3..1ea055f66 100644
--- a/gen3/bin/iam-serviceaccount.sh
+++ b/gen3/bin/iam-serviceaccount.sh
@@ -115,7 +115,7 @@ EOF
# @return the resulting json from awscli
##
function create_role(){
- local role_name="${vpc_name}-${SERVICE_ACCOUNT_NAME}-role"
+ local role_name="${1}"
if [[ ${#role_name} -gt 63 ]]; then
role_name=$(echo "$role_name" | head -c63)
gen3_log_warning "Role name has been truncated, due to amazon role name 64 character limit. New role name is $role_name"
@@ -123,8 +123,8 @@ function create_role(){
local assume_role_policy_path="$(create_assume_role_policy)"
gen3_log_info "Entering create_role"
- gen3_log_info " ${role_name}"
- gen3_log_info " ${assume_role_policy_path}"
+ gen3_log_info " Role: ${role_name}"
+ gen3_log_info " Policy path: ${assume_role_policy_path}"
local role_json
role_json=$(aws iam create-role \
@@ -156,8 +156,8 @@ function add_policy_to_role(){
local role_name="${2}"
gen3_log_info "Entering add_policy_to_role"
- gen3_log_info " ${policy}"
- gen3_log_info " ${role_name}"
+ gen3_log_info " Policy: ${policy}"
+ gen3_log_info " Role: ${role_name}"
local result
if [[ ${policy} =~ arn:aws:iam::aws:policy/[a-zA-Z0-9]+ ]]
@@ -198,8 +198,8 @@ function create_role_with_policy() {
local role_name="${2}"
gen3_log_info "Entering create_role_with_policy"
- gen3_log_info " ${policy}"
- gen3_log_info " ${role_name}"
+ gen3_log_info " Policy: ${policy}"
+ gen3_log_info " Role: ${role_name}"
local created_role_json
created_role_json="$(create_role ${role_name})" || return $?
@@ -357,7 +357,10 @@ function main() {
local policy_validation
local policy_source
- local role_name="${vpc_name}-${SERVICE_ACCOUNT_NAME}-role"
+ local role_name=$ROLE_NAME
+ if [ -z "${role_name}" ]; then
+ role_name="${vpc_name}-${SERVICE_ACCOUNT_NAME}-role"
+ fi
if [ -z ${NAMESPACE_SCRIPT} ];
then
@@ -481,6 +484,12 @@ while getopts "$OPTSPEC" optchar; do
ACTION="c"
SERVICE_ACCOUNT_NAME=${OPTARG#*=}
;;
+ role-name)
+ ROLE_NAME="${!OPTIND}"; OPTIND=$(( $OPTIND + 1 ))
+ ;;
+ role-name=*)
+ ROLE_NAME=${OPTARG#*=}
+ ;;
list)
ACTION="l"
SERVICE_ACCOUNT_NAME="${!OPTIND}"; OPTIND=$(( $OPTIND + 1 ))
diff --git a/gen3/bin/jupyter.sh b/gen3/bin/jupyter.sh
index b2b74e043..169ec59dc 100644
--- a/gen3/bin/jupyter.sh
+++ b/gen3/bin/jupyter.sh
@@ -241,8 +241,15 @@ gen3_jupyter_idle_pods() {
if jq -r --arg cluster "$clusterName" 'select(.cluster | startswith($cluster))' < "$tempClusterFile" | grep "$clusterName" > /dev/null; then
echo "$name"
if [[ "$command" == "kill" ]]; then
- gen3_log_info "try to kill pod $name in $jnamespace"
- g3kubectl delete pod --namespace "$jnamespace" "$name" 1>&2
+ pod_creation=$(date -d $(g3kubectl get pod "$name" -n "$jnamespace" -o jsonpath='{.metadata.creationTimestamp}') +%s)
+ current_time=$(date +%s)
+ age=$((current_time - pod_creation))
+
+ # potential workspaces to be reaped for inactivity must be at least 60 minutes old
+ if ((age >= 3600)); then
+ gen3_log_info "try to kill pod $name in $jnamespace"
+ g3kubectl delete pod --namespace "$jnamespace" "$name" 1>&2
+ fi
fi
else
gen3_log_info "$clusterName not in $(cat $tempClusterFile)"
diff --git a/gen3/bin/kube-roll-all.sh b/gen3/bin/kube-roll-all.sh
index d93ac7600..744e8e288 100644
--- a/gen3/bin/kube-roll-all.sh
+++ b/gen3/bin/kube-roll-all.sh
@@ -51,20 +51,20 @@ fi
gen3 kube-setup-networkpolicy disable
#
-# Hopefull core secrets/config in place - start bringing up services
+# Hopefully core secrets/config in place - start bringing up services
#
-if g3k_manifest_lookup .versions.indexd 2> /dev/null; then
- gen3 kube-setup-indexd &
-else
- gen3_log_info "no manifest entry for indexd"
-fi
-
if g3k_manifest_lookup .versions.arborist 2> /dev/null; then
gen3 kube-setup-arborist || gen3_log_err "arborist setup failed?"
else
gen3_log_info "no manifest entry for arborist"
fi
+if g3k_manifest_lookup .versions.indexd 2> /dev/null; then
+ gen3 kube-setup-indexd &
+else
+ gen3_log_info "no manifest entry for indexd"
+fi
+
if g3k_manifest_lookup '.versions["audit-service"]' 2> /dev/null; then
gen3 kube-setup-audit-service
else
@@ -243,18 +243,50 @@ else
gen3_log_info "not deploying dicom-viewer - no manifest entry for '.versions[\"dicom-viewer\"]'"
fi
+if g3k_manifest_lookup '.versions["gen3-discovery-ai"]' 2> /dev/null; then
+ gen3 kube-setup-gen3-discovery-ai &
+else
+ gen3_log_info "not deploying gen3-discovery-ai - no manifest entry for '.versions[\"gen3-discovery-ai\"]'"
+fi
+
+if g3k_manifest_lookup '.versions["ohdsi-atlas"]' && g3k_manifest_lookup '.versions["ohdsi-webapi"]' 2> /dev/null; then
+ gen3 kube-setup-ohdsi &
+else
+ gen3_log_info "not deploying OHDSI tools - no manifest entry for '.versions[\"ohdsi-atlas\"]' and '.versions[\"ohdsi-webapi\"]'"
+fi
+
+if g3k_manifest_lookup '.versions["cohort-middleware"]' 2> /dev/null; then
+ gen3 kube-setup-cohort-middleware
+else
+ gen3_log_info "not deploying cohort-middleware - no manifest entry for .versions[\"cohort-middleware\"]"
+fi
+
gen3 kube-setup-revproxy
if [[ "$GEN3_ROLL_FAST" != "true" ]]; then
+ if g3k_manifest_lookup .global.argocd 2> /dev/null; then
+ gen3 kube-setup-prometheus
+ fi
# Internal k8s systems
gen3 kube-setup-fluentd &
- gen3 kube-setup-autoscaler &
- gen3 kube-setup-kube-dns-autoscaler &
+ # If there is an entry for karpenter in the manifest setup karpenter
+ if g3k_manifest_lookup .global.karpenter 2> /dev/null; then
+ if [[ "$(g3k_manifest_lookup .global.karpenter)" != "arm" ]]; then
+ gen3 kube-setup-karpenter deploy &
+ else
+ gen3 kube-setup-karpenter deploy --arm &
+ fi
+ # Otherwise, setup the cluster autoscaler
+ else
+ gen3 kube-setup-autoscaler &
+ fi
+ #gen3 kube-setup-kube-dns-autoscaler &
gen3 kube-setup-metrics deploy || true
gen3 kube-setup-tiller || true
#
gen3 kube-setup-networkpolicy disable &
gen3 kube-setup-networkpolicy &
+ gen3 kube-setup-pdb
else
gen3_log_info "roll fast mode - skipping k8s base services and netpolicy setup"
fi
@@ -320,18 +352,6 @@ else
gen3_log_info "not deploying argo-wrapper - no manifest entry for '.versions[\"argo-wrapper\"]'"
fi
-if g3k_manifest_lookup '.versions["cohort-middleware"]' 2> /dev/null; then
- gen3 roll cohort-middleware &
-else
- gen3_log_info "not deploying cohort-middleware - no manifest entry for '.versions[\"cohort-middleware\"]'"
-fi
-
-if g3k_manifest_lookup '.versions["ohdsi-atlas"]' && g3k_manifest_lookup '.versions["ohdsi-webapi"]' 2> /dev/null; then
- gen3 kube-setup-ohdsi &
-else
- gen3_log_info "not deploying OHDSI tools - no manifest entry for '.versions[\"ohdsi-atlas\"]' and '.versions[\"ohdsi-webapi\"]'"
-fi
-
gen3_log_info "enable network policy"
gen3 kube-setup-networkpolicy "enable" || true &
diff --git a/gen3/bin/kube-setup-access-backend.sh b/gen3/bin/kube-setup-access-backend.sh
index bbb3ae663..60d4758c5 100644
--- a/gen3/bin/kube-setup-access-backend.sh
+++ b/gen3/bin/kube-setup-access-backend.sh
@@ -210,8 +210,10 @@ authz:
- /programs/tutorial
- /programs/open_access
role_ids:
- - reader
- - storage_reader
+ - guppy_reader
+ - fence_reader
+ - peregrine_reader
+ - sheepdog_reader
- description: full access to indexd API
id: indexd_admin
resource_paths:
@@ -226,18 +228,22 @@ authz:
- /programs/open_access
role_ids:
- creator
- - reader
+ - guppy_reader
+ - fence_reader
+ - peregrine_reader
+ - sheepdog_reader
- updater
- deleter
- storage_writer
- - storage_reader
- description: ''
id: all_programs_reader
resource_paths:
- /programs
role_ids:
- - reader
- - storage_reader
+ - guppy_reader
+ - fence_reader
+ - peregrine_reader
+ - sheepdog_reader
- id: 'all_programs_writer'
description: ''
role_ids:
@@ -328,12 +334,37 @@ authz:
service: '*'
id: creator
- description: ''
- id: reader
+ id: guppy_reader
permissions:
- action:
method: read
- service: '*'
- id: reader
+ service: 'guppy'
+ id: guppy_reader
+ - description: ''
+ id: fence_reader
+ permissions:
+ - action:
+ method: read
+ service: 'fence'
+ id: fence_reader
+ - action:
+ method: read-storage
+ service: 'fence'
+ id: fence_storage_reader
+ - description: ''
+ id: peregrine_reader
+ permissions:
+ - action:
+ method: read
+ service: 'peregrine'
+ id: peregrine_reader
+ - description: ''
+ id: sheepdog_reader
+ permissions:
+ - action:
+ method: read
+ service: 'sheepdog'
+ id: sheepdog_reader
- description: ''
id: updater
permissions:
@@ -355,13 +386,6 @@ authz:
method: write-storage
service: '*'
id: storage_creator
- - description: ''
- id: storage_reader
- permissions:
- - action:
- method: read-storage
- service: '*'
- id: storage_reader
- id: mds_user
permissions:
- action:
diff --git a/gen3/bin/kube-setup-ambassador.sh b/gen3/bin/kube-setup-ambassador.sh
index 0f4e0be28..5f92af5cc 100644
--- a/gen3/bin/kube-setup-ambassador.sh
+++ b/gen3/bin/kube-setup-ambassador.sh
@@ -25,7 +25,6 @@ deploy_api_gateway() {
return 0
fi
gen3 roll ambassador-gen3
- g3k_kv_filter "${GEN3_HOME}/kube/services/ambassador-gen3/ambassador-gen3-service-elb.yaml" GEN3_ARN "$(g3kubectl get configmap global --output=jsonpath='{.data.revproxy_arn}')" | g3kubectl apply -f -
local luaYamlTemp="$(mktemp "$XDG_RUNTIME_DIR/lua.yaml.XXXXXX")"
cat - > "$luaYamlTemp" < /dev/null 2>&1
+ secrets=$(g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-create --client $fence_client --urls https://${hostname}/guac/guacamole/#/ --username guacamole --auto-approve --public --external --allowed-scopes openid profile email user | tail -1)
+ if [[ ! $secrets =~ (\'(.*)\', None) ]]; then
+ gen3_log_err "kube-setup-apache-guacamole" "Failed generating oidc client for guacamole: $secrets"
+ return 1
+ fi
+ fi
+ local FENCE_CLIENT_ID="${BASH_REMATCH[2]}"
+ local FENCE_CLIENT_SECRET="${BASH_REMATCH[3]}"
+ gen3_log_info "create guacamole-secret"
+ mkdir -m 0700 -p "$(gen3_secrets_folder)/g3auto/guacamole"
+
+ cat - < /dev/null 2>&1; then
+ local credsPath="$(gen3_secrets_folder)/g3auto/guacamole/appcreds.json"
+ if [ -f "$credsPath" ]; then
+ gen3 secrets sync
+ return 0
+ fi
+ mkdir -p "$(dirname "$credsPath")"
+ if ! new_client > "$credsPath"; then
+ gen3_log_err "Failed to setup guacamole fence client"
+ rm "$credsPath" || true
+ return 1
+ fi
+ gen3 secrets sync
+ fi
+
+ if ! g3kubectl describe secret guacamole-g3auto | grep dbcreds.json > /dev/null 2>&1; then
+ gen3_log_info "create database"
+ if ! gen3 db setup guacamole; then
+ gen3_log_err "Failed setting up database for guacamole service"
+ return 1
+ fi
+ gen3 secrets sync
+ fi
+}
+
+setup_secrets() {
+ # guacamole-secrets.yaml populate and apply.
+ gen3_log_info "Deploying secrets for guacamole"
+ # subshell
+
+ (
+ if ! dbcreds="$(gen3 db creds guacamole)"; then
+ gen3_log_err "unable to find db creds for guacamole service"
+ return 1
+ fi
+
+ if ! appcreds="$(gen3 secrets decode guacamole-g3auto appcreds.json)"; then
+ gen3_log_err "unable to find app creds for guacamole service"
+ return 1
+ fi
+
+ local hostname=$(gen3 api hostname)
+ export DB_NAME=$(jq -r ".db_database" <<< "$dbcreds")
+ export DB_USER=$(jq -r ".db_username" <<< "$dbcreds")
+ export DB_PASS=$(jq -r ".db_password" <<< "$dbcreds")
+ export DB_HOST=$(jq -r ".db_host" <<< "$dbcreds")
+
+ export FENCE_URL="https://${hostname}/user/user"
+ export FENCE_METADATA_URL="https://${hostname}/.well-known/openid-configuration"
+ export FENCE_CLIENT_ID=$(jq -r ".FENCE_CLIENT_ID" <<< "$appcreds")
+ export FENCE_CLIENT_SECRET=$(jq -r ".FENCE_CLIENT_SECRET" <<< "$appcreds")
+
+ export OPENID_AUTHORIZATION_ENDPOINT="https://${hostname}/user/oauth2/authorize"
+ export OPENID_JWKS_ENDPOINT="https://${hostname}/user/.well-known/jwks"
+ export OPENID_REDIRECT_URI="https://${hostname}/guac/guacamole/#/"
+ export OPENID_ISSUER="https://${hostname}/user"
+ export OPENID_USERNAME_CLAIM_TYPE="sub"
+ export OPENID_SCOPE="openid profile email"
+
+ envsubst <"${GEN3_HOME}/kube/services/apache-guacamole/apache-guacamole-configmap.yaml" | g3kubectl apply -f -
+ envsubst <"${GEN3_HOME}/kube/services/apache-guacamole/apache-guacamole-secret.yaml" | g3kubectl apply -f -
+ )
+}
+
+# main --------------------------------------
+if [[ $# -gt 0 && "$1" == "new-client" ]]; then
+ new_client
+ exit $?
+fi
+
+setup_creds
+
+setup_secrets
+
+gen3 roll apache-guacamole
+g3kubectl apply -f "${GEN3_HOME}/kube/services/apache-guacamole/apache-guacamole-service.yaml"
+
+cat < /dev/null 2>&1; then
+ gen3_log_info "Creating argo-events namespace, as it was not found"
+ kubectl create namespace argo-events
+fi
+
+# Check if target configmap exists
+if ! kubectl get configmap environment -n argo-events > /dev/null 2>&1; then
+
+ # Get value from source configmap
+ VALUE=$(kubectl get configmap global -n default -o jsonpath="{.data.environment}")
+
+ # Create target configmap
+ kubectl create configmap environment -n argo-events --from-literal=environment=$VALUE
+
+fi
+
+if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" || "$override_namespace" == true ]]; then
+ if (! helm status argo -n argo-events > /dev/null 2>&1 ) || [[ "$force" == true ]]; then
+ helm repo add argo https://argoproj.github.io/argo-helm --force-update 2> >(grep -v 'This is insecure' >&2)
+ helm repo update 2> >(grep -v 'This is insecure' >&2)
+ helm upgrade --install argo-events argo/argo-events -n argo-events --version "2.1.3"
+ else
+ gen3_log_info "argo-events Helm chart already installed. To force reinstall, run with --force"
+ fi
+
+ if kubectl get statefulset eventbus-default-stan -n argo-events >/dev/null 2>&1; then
+ gen3_log_info "Detected eventbus installation. To reinstall, please delete the eventbus first. You will need to delete any EventSource and Sensors currently in use"
+ else
+ kubectl apply -f ${GEN3_HOME}/kube/services/argo-events/eventbus.yaml
+ fi
+else
+ gen3_log_info "Not running in default namespace, will not install argo-events helm chart. This behavior can be overwritten with the --override-namespace flag"
+fi
+
+if [[ "$create_workflow_resources" == true ]]; then
+ for file in ${GEN3_HOME}/kube/services/argo-events/workflows/*.yaml; do
+ kubectl apply -f "$file"
+ done
+
+ #Creating rolebindings to allow Argo Events to create jobs, and allow those jobs to manage Karpenter resources
+ kubectl create rolebinding argo-events-job-admin-binding --role=job-admin --serviceaccount=argo-events:default --namespace=argo-events
+ kubectl create clusterrolebinding karpenter-admin-binding --clusterrole=karpenter-admin --serviceaccount=argo-events:default
+ kubectl create clusterrolebinding argo-workflows-view-binding --clusterrole=argo-argo-workflows-view --serviceaccount=argo-events:default
+fi
\ No newline at end of file
diff --git a/gen3/bin/kube-setup-argo-wrapper.sh b/gen3/bin/kube-setup-argo-wrapper.sh
index 5727a703e..9f7cc52ce 100644
--- a/gen3/bin/kube-setup-argo-wrapper.sh
+++ b/gen3/bin/kube-setup-argo-wrapper.sh
@@ -18,6 +18,26 @@ if [[ -z "$GEN3_SOURCE_ONLY" ]]; then
gen3 roll argo-wrapper
g3kubectl apply -f "${GEN3_HOME}/kube/services/argo-wrapper/argo-wrapper-service.yaml"
+
+
+ if g3k_manifest_lookup .argo.argo_server_service_url 2> /dev/null; then
+ export ARGO_HOST=$(g3k_manifest_lookup .argo.argo_server_service_url)
+ else
+ export ARGO_HOST="http://argo-argo-workflows-server.argo.svc.cluster.local:2746"
+ fi
+
+ if g3k_config_lookup '.argo_namespace' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json 2> /dev/null; then
+ export ARGO_NAMESPACE=$(g3k_config_lookup '.argo_namespace' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json)
+ else
+ export ARGO_NAMESPACE="argo"
+ fi
+
+ envsubst <"${GEN3_HOME}/kube/services/argo-wrapper/config.ini" > /tmp/config.ini
+
+ g3kubectl delete configmap argo-wrapper-namespace-config
+ g3kubectl create configmap argo-wrapper-namespace-config --from-file /tmp/config.ini
+
+ rm /tmp/config.ini
gen3_log_info "the argo-wrapper service has been deployed onto the kubernetes cluster"
-fi
\ No newline at end of file
+fi
diff --git a/gen3/bin/kube-setup-argo.sh b/gen3/bin/kube-setup-argo.sh
index e95f216fe..4c6c55eee 100644
--- a/gen3/bin/kube-setup-argo.sh
+++ b/gen3/bin/kube-setup-argo.sh
@@ -5,36 +5,55 @@ source "${GEN3_HOME}/gen3/lib/utils.sh"
gen3_load "gen3/gen3setup"
gen3_load "gen3/lib/kube-setup-init"
+override_namespace=false
+force=false
+
+for arg in "${@}"; do
+ if [ "$arg" == "--override-namespace" ]; then
+ override_namespace=true
+ elif [ "$arg" == "--force" ]; then
+ force=true
+ else
+ #Print usage info and exit
+ gen3_log_info "Usage: gen3 kube-setup-argo [--override-namespace] [--force]"
+ exit 1
+ fi
+done
ctx="$(g3kubectl config current-context)"
ctxNamespace="$(g3kubectl config view -ojson | jq -r ".contexts | map(select(.name==\"$ctx\")) | .[0] | .context.namespace")"
+argo_namespace=$(g3k_config_lookup '.argo_namespace' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json)
function setup_argo_buckets {
local accountNumber
local environment
local policyFile="$XDG_RUNTIME_DIR/policy_$$.json"
+ local bucketLifecyclePolicyFile="$XDG_RUNTIME_DIR/bucket_lifecycle_policy_$$.json"
if ! accountNumber="$(aws sts get-caller-identity --output text --query 'Account')"; then
gen3_log_err "could not determine account numer"
return 1
fi
- if ! environment="$(g3kubectl get configmap manifest-global -o json | jq -r .data.environment)"; then
+ if ! environment="$(g3k_environment)"; then
gen3_log_err "could not determine environment from manifest-global - bailing out of argo setup"
return 1
fi
# try to come up with a unique but composable bucket name
bucketName="gen3-argo-${accountNumber}-${environment//_/-}"
- userName="gen3-argo-${environment//_/-}-user"
- if [[ ! -z $(g3k_config_lookup '."s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) || ! -z $(g3k_config_lookup '.argo."s3-bucket"') ]]; then
- if [[ ! -z $(g3k_config_lookup '."s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) ]]; then
+ nameSpace="$(gen3 db namespace)"
+ roleName="gen3-argo-${environment//_/-}-role"
+ bucketPolicy="argo-bucket-policy-${nameSpace}"
+ internalBucketPolicy="argo-internal-bucket-policy-${nameSpace}"
+ if [[ ! -z $(g3k_config_lookup '."downloadable-s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) || ! -z $(g3k_config_lookup '.argo."downloadable-s3-bucket"') ]]; then
+ if [[ ! -z $(g3k_config_lookup '."downloadable-s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) ]]; then
gen3_log_info "Using S3 bucket found in manifest: ${bucketName}"
- bucketName=$(g3k_config_lookup '."s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json)
+ bucketName=$(g3k_config_lookup '."downloadable-s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json)
else
gen3_log_info "Using S3 bucket found in manifest: ${bucketName}"
- bucketName=$(g3k_config_lookup '.argo."s3-bucket"')
+ bucketName=$(g3k_config_lookup '.argo."downloadable-s3-bucket"')
fi
fi
if [[ ! -z $(g3k_config_lookup '."internal-s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) || ! -z $(g3k_config_lookup '.argo."internal-s3-bucket"') ]]; then
@@ -98,65 +117,70 @@ EOF
]
}
EOF
- if ! secret="$(g3kubectl get secret argo-s3-creds -n argo 2> /dev/null)"; then
- gen3_log_info "setting up bucket $bucketName"
-
- if aws s3 ls --page-size 1 "s3://${bucketName}" > /dev/null 2>&1; then
- gen3_log_info "${bucketName} s3 bucket already exists"
- # continue on ...
- elif ! aws s3 mb "s3://${bucketName}"; then
- gen3_log_err "failed to create bucket ${bucketName}"
- fi
-
-
- gen3_log_info "Creating IAM user ${userName}"
- if ! aws iam get-user --user-name ${userName} > /dev/null 2>&1; then
- aws iam create-user --user-name ${userName}
- else
- gen3_log_info "IAM user ${userName} already exits.."
- fi
-
- secret=$(aws iam create-access-key --user-name ${userName})
- if ! g3kubectl get namespace argo > /dev/null 2>&1; then
- gen3_log_info "Creating argo namespace"
- g3kubectl create namespace argo
- g3kubectl label namespace argo app=argo
- g3kubectl create rolebinding argo-admin --clusterrole=admin --serviceaccount=argo:default -n argo
- fi
- else
- # Else we want to recreate the argo-s3-creds secret so make a temp file with the current creds and delete argo-s3-creds secret
- gen3_log_info "Argo S3 setup already completed"
- local secretFile="$XDG_RUNTIME_DIR/temp_key_file_$$.json"
- cat > "$secretFile" < "$bucketLifecyclePolicyFile" < /dev/null 2>&1; then
+ gen3_log_info "${bucketName} s3 bucket already exists"
+ # continue on ...
+ elif ! aws s3 mb "s3://${bucketName}"; then
+ gen3_log_err "failed to create bucket ${bucketName}"
fi
-
-
- gen3_log_info "Creating s3 creds secret in argo namespace"
- if [[ -z $internalBucketName ]]; then
- g3kubectl create secret -n argo generic argo-s3-creds --from-literal=AccessKeyId=$(echo $secret | jq -r .AccessKey.AccessKeyId) --from-literal=SecretAccessKey=$(echo $secret | jq -r .AccessKey.SecretAccessKey) --from-literal=bucketname=${bucketName}
+ if ! g3kubectl get namespace argo > /dev/null 2>&1; then
+ gen3_log_info "Creating argo namespace"
+ g3kubectl create namespace argo || true
+ g3kubectl label namespace argo app=argo || true
+ # Grant admin access within the argo namespace to the default SA in the argo namespace
+ g3kubectl create rolebinding argo-admin --clusterrole=admin --serviceaccount=argo:default -n $argo_namespace || true
+ fi
+ gen3_log_info "Creating IAM role ${roleName}"
+ if aws iam get-role --role-name "${roleName}" > /dev/null 2>&1; then
+ gen3_log_info "IAM role ${roleName} already exists.."
+ roleArn=$(aws iam get-role --role-name "${roleName}" --query 'Role.Arn' --output text)
+ gen3_log_info "Role annotate"
+ g3kubectl annotate serviceaccount default eks.amazonaws.com/role-arn=${roleArn} --overwrite -n $argo_namespace
+ g3kubectl annotate serviceaccount argo eks.amazonaws.com/role-arn=${roleArn} --overwrite -n $nameSpace
else
- g3kubectl create secret -n argo generic argo-s3-creds --from-literal=AccessKeyId=$(echo $secret | jq -r .AccessKey.AccessKeyId) --from-literal=SecretAccessKey=$(echo $secret | jq -r .AccessKey.SecretAccessKey) --from-literal=bucketname=${bucketName} --from-literal=internalbucketname=${internalBucketName}
+ gen3 awsrole create $roleName argo $nameSpace -all_namespaces
+ roleArn=$(aws iam get-role --role-name "${roleName}" --query 'Role.Arn' --output text)
+ g3kubectl annotate serviceaccount default eks.amazonaws.com/role-arn=${roleArn} -n $argo_namespace
fi
+ # Grant admin access within the current namespace to the argo SA in the current namespace
+ g3kubectl create rolebinding argo-admin --clusterrole=admin --serviceaccount=$nameSpace:argo -n $nameSpace || true
+ aws iam put-role-policy --role-name ${roleName} --policy-name ${bucketPolicy} --policy-document file://$policyFile || true
+ if [[ -z $internalBucketName ]]; then
+ aws iam put-role-policy --role-name ${roleName} --policy-name ${internalBucketPolicy} --policy-document file://$internalBucketPolicyFile || true
+ fi
## if new bucket then do the following
# Get the aws keys from secret
+ # Create and attach lifecycle policy
# Set bucket policies
# Update secret to have new bucket
+ gen3_log_info "Creating bucket lifecycle policy"
+ aws s3api put-bucket-lifecycle --bucket ${bucketName} --lifecycle-configuration file://$bucketLifecyclePolicyFile
+
# Always update the policy, in case manifest buckets change
- aws iam put-user-policy --user-name ${userName} --policy-name argo-bucket-policy --policy-document file://$policyFile
+ aws iam put-role-policy --role-name ${roleName} --policy-name ${bucketPolicy} --policy-document file://$policyFile
if [[ ! -z $internalBucketPolicyFile ]]; then
- aws iam put-user-policy --user-name ${userName} --policy-name argo-internal-bucket-policy --policy-document file://$internalBucketPolicyFile
+ aws iam put-role-policy --role-name ${roleName} --policy-name ${internalBucketPolicy} --policy-document file://$internalBucketPolicyFile
fi
if [[ ! -z $(g3k_config_lookup '.indexd_admin_user' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) || ! -z $(g3k_config_lookup '.argo.indexd_admin_user') ]]; then
if [[ ! -z $(g3k_config_lookup '.indexd_admin_user' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) ]]; then
@@ -168,39 +192,53 @@ EOF
for serviceName in indexd; do
secretName="${serviceName}-creds"
# Only delete if secret is found to prevent early exits
- if [[ ! -z $(g3kubectl get secrets -n argo | grep $secretName) ]]; then
- g3kubectl delete secret "$secretName" -n argo > /dev/null 2>&1
+ if [[ ! -z $(g3kubectl get secrets -n $argo_namespace | grep $secretName) ]]; then
+ g3kubectl delete secret "$secretName" -n $argo_namespace > /dev/null 2>&1
fi
done
sleep 1 # I think delete is async - give backend a second to finish
indexdFencePassword=$(cat $(gen3_secrets_folder)/creds.json | jq -r .indexd.user_db.$indexd_admin_user)
- g3kubectl create secret generic "indexd-creds" --from-literal=user=$indexd_admin_user --from-literal=password=$indexdFencePassword -n argo
+ g3kubectl create secret generic "indexd-creds" --from-literal=user=$indexd_admin_user --from-literal=password=$indexdFencePassword -n $argo_namespace
fi
}
function setup_argo_db() {
- if ! secret="$(g3kubectl get secret argo-db-creds -n argo 2> /dev/null)"; then
+ if ! secret="$(g3kubectl get secret argo-db-creds -n $argo_namespace 2> /dev/null)"; then
gen3_log_info "Setting up argo db persistence"
gen3 db setup argo || true
dbCreds=$(gen3 secrets decode argo-g3auto dbcreds.json)
- g3kubectl create secret -n argo generic argo-db-creds --from-literal=db_host=$(echo $dbCreds | jq -r .db_host) --from-literal=db_username=$(echo $dbCreds | jq -r .db_username) --from-literal=db_password=$(echo $dbCreds | jq -r .db_password) --from-literal=db_database=$(echo $dbCreds | jq -r .db_database)
+ g3kubectl create secret -n $argo_namespace generic argo-db-creds --from-literal=db_host=$(echo $dbCreds | jq -r .db_host) --from-literal=db_username=$(echo $dbCreds | jq -r .db_username) --from-literal=db_password=$(echo $dbCreds | jq -r .db_password) --from-literal=db_database=$(echo $dbCreds | jq -r .db_database)
else
gen3_log_info "Argo DB setup already completed"
fi
}
+function setup_argo_template_secret() {
+ gen3_log_info "Started the template secret process"
+ downloadable_bucket_name=$(g3k_config_lookup '."downloadable-s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json)
+ # Check if the secret already exists
+ if [[ ! -z $(g3kubectl get secret argo-template-values-secret -n $argo_namespace) ]]; then
+ gen3_log_info "Argo template values secret already exists, assuming it's stale and deleting"
+ g3kubectl delete secret argo-template-values-secret -n $argo_namespace
+ fi
+ gen3_log_info "Creating argo template values secret"
+ g3kubectl create secret generic argo-template-values-secret --from-literal=DOWNLOADABLE_BUCKET=$downloadable_bucket_name -n $argo_namespace
+}
+
+setup_argo_buckets
# only do this if we are running in the default namespace
-if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then
- setup_argo_buckets
+if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" || "$override_namespace" == true ]]; then
setup_argo_db
- if (! helm status argo -n argo > /dev/null 2>&1 ) || [[ "$1" == "--force" ]]; then
- DBHOST=$(kubectl get secrets -n argo argo-db-creds -o json | jq -r .data.db_host | base64 -d)
- DBNAME=$(kubectl get secrets -n argo argo-db-creds -o json | jq -r .data.db_database | base64 -d)
- if [[ -z $(kubectl get secrets -n argo argo-s3-creds -o json | jq -r .data.internalbucketname | base64 -d) ]]; then
- BUCKET=$(kubectl get secrets -n argo argo-s3-creds -o json | jq -r .data.bucketname | base64 -d)
+ setup_argo_template_secret
+ if (! helm status argo -n $argo_namespace > /dev/null 2>&1 ) || [[ "$force" == true ]]; then
+ DBHOST=$(kubectl get secrets -n $argo_namespace argo-db-creds -o json | jq -r .data.db_host | base64 -d)
+ DBNAME=$(kubectl get secrets -n $argo_namespace argo-db-creds -o json | jq -r .data.db_database | base64 -d)
+ if [[ -z $internalBucketName ]]; then
+ BUCKET=$bucketName
else
- BUCKET=$(kubectl get secrets -n argo argo-s3-creds -o json | jq -r .data.internalbucketname | base64 -d)
+ BUCKET=$internalBucketName
fi
+
valuesFile="$XDG_RUNTIME_DIR/values_$$.yaml"
valuesTemplate="${GEN3_HOME}/kube/services/argo/values.yaml"
@@ -208,10 +246,10 @@ if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then
helm repo add argo https://argoproj.github.io/argo-helm --force-update 2> >(grep -v 'This is insecure' >&2)
helm repo update 2> >(grep -v 'This is insecure' >&2)
- helm upgrade --install argo argo/argo-workflows -n argo -f ${valuesFile}
+ helm upgrade --install argo argo/argo-workflows -n $argo_namespace -f ${valuesFile} --version 0.29.1
else
gen3_log_info "kube-setup-argo exiting - argo already deployed, use --force to redeploy"
fi
else
gen3_log_info "kube-setup-argo exiting - only deploys from default namespace"
-fi
+fi
\ No newline at end of file
diff --git a/gen3/bin/kube-setup-argocd.sh b/gen3/bin/kube-setup-argocd.sh
new file mode 100644
index 000000000..4a9ac0f74
--- /dev/null
+++ b/gen3/bin/kube-setup-argocd.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Deploy the argocd
+#
+
+source "${GEN3_HOME}/gen3/lib/utils.sh"
+gen3_load "gen3/gen3setup"
+
+if g3kubectl get namespace argocd > /dev/null 2>&1;
+then
+ gen3_log_info "ArgoCD is already deployed. Skipping..."
+else
+ kubectl create namespace argocd
+ kubectl label namespace argocd app="argocd"
+ kubectl annotate namespace argocd app="argocd"
+ helm repo add argo https://argoproj.github.io/argo-helm
+ helm upgrade --install argocd -f "$GEN3_HOME/kube/services/argocd/values.yaml" argo/argo-cd -n argocd
+ gen3 kube-setup-revproxy
+ export argocdsecret=`kubectl get secret argocd-initial-admin-secret -n argocd -o json | jq .data.password -r | base64 -d` # pragma: allowlist secret
+ gen3_log_info "You can now access the ArgoCD endpoint with the following credentials: Username= admin and Password= $argocdsecret"
+fi
\ No newline at end of file
diff --git a/gen3/bin/kube-setup-audit-service.sh b/gen3/bin/kube-setup-audit-service.sh
index 2eebe0f97..92c70f352 100644
--- a/gen3/bin/kube-setup-audit-service.sh
+++ b/gen3/bin/kube-setup-audit-service.sh
@@ -21,7 +21,7 @@ setup_database_and_config() {
# Setup config file that audit-service consumes
local secretsFolder="$(gen3_secrets_folder)/g3auto/audit"
- if [[ ! -f "$secretsFolder/audit-service-config.yaml" || ! -f "$secretsFolder/base64Authz.txt" ]]; then
+ if [[ ! -f "$secretsFolder/audit-service-config.yaml" ]]; then
if [[ ! -f "$secretsFolder/dbcreds.json" ]]; then
if ! gen3 db setup audit; then
gen3_log_err "Failed setting up database for audit-service"
@@ -60,14 +60,12 @@ DB_USER: $(jq -r .db_username < "$secretsFolder/dbcreds.json")
DB_PASSWORD: $(jq -r .db_password < "$secretsFolder/dbcreds.json")
DB_DATABASE: $(jq -r .db_database < "$secretsFolder/dbcreds.json")
EOM
- # make it easy for nginx to get the Authorization header ...
- # echo -n "gateway:$password" | base64 > "$secretsFolder/base64Authz.txt"
fi
gen3 secrets sync 'setup audit-g3auto secrets'
}
setup_audit_sqs() {
- local sqsName="$(gen3 api safe-name audit-sqs)"
+ local sqsName="audit-sqs"
sqsInfo="$(gen3 sqs create-queue-if-not-exist $sqsName)" || exit 1
sqsUrl="$(jq -e -r '.["url"]' <<< "$sqsInfo")" || { echo "Cannot get 'sqs-url' from output: $sqsInfo"; exit 1; }
sqsArn="$(jq -e -r '.["arn"]' <<< "$sqsInfo")" || { echo "Cannot get 'sqs-arn' from output: $sqsInfo"; exit 1; }
diff --git a/gen3/bin/kube-setup-aurora-monitoring.sh b/gen3/bin/kube-setup-aurora-monitoring.sh
new file mode 100644
index 000000000..5029a87ca
--- /dev/null
+++ b/gen3/bin/kube-setup-aurora-monitoring.sh
@@ -0,0 +1,167 @@
+source "${GEN3_HOME}/gen3/lib/utils.sh"
+gen3_load "gen3/gen3setup"
+
+databaseArray=()
+databaseFarmArray=()
+
+# This function is going to retrieve and return all the top-level entries from creds.json, that has the db items we want.
+# This way, we can use this information while we're creating schemas and the like
+get_all_dbs() {
+ databases=$(jq 'to_entries[] | select (.value.db_password) | .key' $(gen3_secrets_folder)/creds.json)
+
+ OLD_IFS=$IFS
+ IFS=$'\n' databaseArray=($databases)
+ IFS=$OLD_IFS
+}
+
+get_all_dbs_db_farm() {
+ databases=$(jq 'to_entries[] | .key' $(gen3_secrets_folder)/g3auto/dbfarm/servers.json)
+
+ OLD_IFS=$IFS
+ IFS=$'\n' databaseFarmArray=($databases)
+ IFS=$OLD_IFS
+}
+
+create_new_datadog_user() {
+ # Generate a new password for the datadog user in psql
+ datadogPsqlPassword=$(random_alphanumeric)
+
+ # update creds.json
+ if [ ! -d "$(gen3_secrets_folder)/datadog" ]
+ then
+ mkdir "$(gen3_secrets_folder)/datadog"
+ fi
+
+ if [ ! -s "$(gen3_secrets_folder)/datadog/datadog_db_users" ]
+ then
+ echo "{}" > "$(gen3_secrets_folder)/datadog/datadog_db_users.json"
+ fi
+
+ output=$(jq --arg host "$1" --arg password "$datadogPsqlPassword" '.[$host].datadog_db_password=$password' "$(gen3_secrets_folder)/datadog/datadog_db_users.json")
+ echo "$output" > "$(gen3_secrets_folder)/datadog/datadog_db_users.json"
+
+ username=$(jq --arg host "$1" 'map(select(.db_host==$host))[0] | .db_username' $(gen3_secrets_folder)/g3auto/dbfarm/servers.json | tr -d '"')
+ password=$(jq --arg host "$1" 'map(select(.db_host==$host))[0] | .db_password' $(gen3_secrets_folder)/g3auto/dbfarm/servers.json | tr -d '"')
+
+ # Create the Datadog user in the database
+ if PGPASSWORD=$password psql -h "$1" -U "$username" -c "SELECT 1 FROM pg_roles WHERE rolname='datadog'" | grep -q 1;
+ then
+ PGPASSWORD=$password psql -h "$1" -U "$username" -c "ALTER USER datadog WITH password '$datadogPsqlPassword';"
+ else
+ PGPASSWORD=$password psql -h "$1" -U "$username" -c "CREATE USER datadog WITH password '$datadogPsqlPassword';"
+ fi
+
+ echo $datadogPsqlPassword
+}
+
+get_datadog_db_password() {
+ # Create the Datadog user
+ datadogPsqlPassword="$(jq --arg host "$1" '.[$host].datadog_db_password' < $(gen3_secrets_folder)/datadog/datadog_db_users.json)"
+ if [[ -z "$datadogPsqlPassword" ]]
+ then
+ datadogPsqlPassword=$(create_new_datadog_user $1)
+ fi
+
+ echo $datadogPsqlPassword
+}
+
+create_schema_and_function() {
+ svc=$(echo $1 | tr -d '"')
+ host=$(jq --arg service "$svc" '.[$service].db_host' $(gen3_secrets_folder)/creds.json | tr -d '"')
+ database=$(jq --arg service "$svc" '.[$service].db_database' $(gen3_secrets_folder)/creds.json | tr -d '"')
+
+ username=$(jq --arg host "$host" 'map(select(.db_host==$host))[0] | .db_username' $(gen3_secrets_folder)/g3auto/dbfarm/servers.json | tr -d '"')
+ password=$(jq --arg host "$host" 'map(select(.db_host==$host))[0] | .db_password' $(gen3_secrets_folder)/g3auto/dbfarm/servers.json | tr -d '"')
+
+ ddPass=$(get_datadog_db_password $host)
+
+ PGPASSWORD=$password psql -h $host -U $username -d $database -t < /dev/null
+then
+ gen3_log_info "We detected an ArgoCD application named 'datadog-application,' so we're modifying that"
+
+ patch=$(yq -n --yaml-output --arg confd "$confd" '.spec.source.helm.values = $confd')
+
+ echo "$patch" > /tmp/confd.yaml
+
+ kubectl patch applications.argoproj.io datadog-application --type merge -n argocd --patch-file /tmp/confd.yaml
+
+else
+ gen3_log_info "We didn't detect an ArgoCD application named 'datadog-application,' so we're going to reinstall the DD Helm chart"
+
+ (cat kube/services/datadog/values.yaml | yq --arg endpoints "$postgresString" --yaml-output '.clusterAgent.confd."postgres.yaml" = $endpoints | .clusterChecksRunner.enabled = true') > $(gen3_secrets_folder)/datadog/datadog_values.yaml
+ helm repo add datadog https://helm.datadoghq.com --force-update 2> >(grep -v 'This is insecure' >&2)
+ helm repo update 2> >(grep -v 'This is insecure' >&2)
+ helm upgrade --install datadog -f "$(gen3_secrets_folder)/datadog/datadog_values.yaml" datadog/datadog -n datadog --version 3.6.4 2> >(grep -v 'This is insecure' >&2)
+fi
\ No newline at end of file
diff --git a/gen3/bin/kube-setup-autoscaler-for-large-workflows.sh b/gen3/bin/kube-setup-autoscaler-for-large-workflows.sh
new file mode 100644
index 000000000..5bf4df8b7
--- /dev/null
+++ b/gen3/bin/kube-setup-autoscaler-for-large-workflows.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+# Set the resources block for the deployment
+kubectl patch deployment cluster-autoscaler -n kube-system --type=json -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/resources", "value": {"limits":{"cpu":"6","memory":"30Gi"},"requests":{"cpu":"1","memory":"4Gi"}}}]'
+
+# Add options to the command for the container, if they are not already present
+if ! kubectl get deployment cluster-autoscaler -n kube-system -o jsonpath='{.spec.template.spec.containers[0].command}' | yq eval '.[]' | grep -q -- '--scale-down-delay-after-delete=2m'; then
+ kubectl patch deployment cluster-autoscaler -n kube-system --type=json -p='[{"op": "add", "path": "/spec/template/spec/containers/0/command/-", "value": "--scale-down-delay-after-delete=2m"}]'
+else
+ echo "Flag --scale-down-delay-after-delete=2m already present"
+fi
+
+if ! kubectl get deployment cluster-autoscaler -n kube-system -o jsonpath='{.spec.template.spec.containers[0].command}' | yq eval '.[]' | grep -q -- '--scale-down-unneeded-time=2m'; then
+ kubectl patch deployment cluster-autoscaler -n kube-system --type=json -p='[{"op": "add", "path": "/spec/template/spec/containers/0/command/-", "value": "--scale-down-unneeded-time=2m"}]'
+else
+ echo "Flag --scale-down-unneeded-time=2m already present"
+fi
+
+if ! kubectl get deployment cluster-autoscaler -n kube-system -o jsonpath='{.spec.template.spec.containers[0].command}' | yq eval '.[]' | grep -q -- '--scan-interval=60s'; then
+ kubectl patch deployment cluster-autoscaler -n kube-system --type=json -p='[{"op": "add", "path": "/spec/template/spec/containers/0/command/-", "value": "--scan-interval=60s"}]'
+else
+ echo "Flag --scan-interval=60s already present"
+fi
+
+# Add PriorityClass to the pod
+kubectl patch deployment cluster-autoscaler -n kube-system --type=json -p='[{"op": "add", "path": "/spec/template/spec/priorityClassName", "value": "system-node-critical"}]'
diff --git a/gen3/bin/kube-setup-autoscaler.sh b/gen3/bin/kube-setup-autoscaler.sh
index 01a6cdd95..8aeff8b5b 100644
--- a/gen3/bin/kube-setup-autoscaler.sh
+++ b/gen3/bin/kube-setup-autoscaler.sh
@@ -1,6 +1,6 @@
#!/bin/bash
#
-# cluster-autoscaler allow a kubernetes cluste scale out or in depending on the
+# cluster-autoscaler allow a kubernetes cluste scale out or in depending on the
# specification set in deployment. It'll talk to the ASG where the worker nodes are
# and send a signal to add or remove instances based upon requirements.
#
@@ -11,6 +11,9 @@
source "${GEN3_HOME}/gen3/lib/utils.sh"
gen3_load "gen3/lib/kube-setup-init"
+ctx="$(g3kubectl config current-context)"
+ctxNamespace="$(g3kubectl config view -ojson | jq -r ".contexts | map(select(.name==\"$ctx\")) | .[0] | .context.namespace")"
+
if [[ -n "$JENKINS_HOME" ]]; then
echo "Jenkins skipping fluentd setup: $JENKINS_HOME"
exit 0
@@ -30,6 +33,9 @@ function get_autoscaler_version(){
local casv
case ${k8s_version} in
+ "1.22+")
+ casv="v1.22.2"
+ ;;
"1.21+")
casv="v1.21.2"
;;
@@ -66,34 +72,52 @@ function get_autoscaler_version(){
function deploy() {
+ if [["$ctxNamespace" == "default" || "$ctxNamespace" == "null"]]; then
+ if (! g3kubectl --namespace=kube-system get deployment cluster-autoscaler > /dev/null 2>&1 || "${FORCE}" == true); then
+ if ! [ -z ${CAS_VERSION} ];
+ then
+ casv=${CAS_VERSION}
+ else
+ casv="$(get_autoscaler_version)" # cas stands for ClusterAutoScaler
+ fi
+ echo "Deploying cluster autoscaler ${casv} in ${vpc_name}"
+ g3k_kv_filter "${GEN3_HOME}/kube/services/autoscaler/cluster-autoscaler-autodiscover.yaml" VPC_NAME "${vpc_name}" CAS_VERSION ${casv} | g3kubectl "--namespace=kube-system" apply -f -
+ else
+ echo "kube-setup-autoscaler exiting - cluster-autoscaler already deployed, use --force to redeploy"
+ fi
+ fi
+}
+
+function remove() {
- if (! g3kubectl --namespace=kube-system get deployment cluster-autoscaler > /dev/null 2>&1) || [[ "$FORCE" == true ]]; then
+ if ( g3kubectl --namespace=kube-system get deployment cluster-autoscaler > /dev/null 2>&1); then
if ! [ -z ${CAS_VERSION} ];
then
casv=${CAS_VERSION}
else
casv="$(get_autoscaler_version)" # cas stands for ClusterAutoScaler
fi
- echo "Deploying cluster autoscaler ${casv} in ${vpc_name}"
- g3k_kv_filter "${GEN3_HOME}/kube/services/autoscaler/cluster-autoscaler-autodiscover.yaml" VPC_NAME "${vpc_name}" CAS_VERSION ${casv} | g3kubectl "--namespace=kube-system" apply -f -
+ echo "Removing cluster autoscaler ${casv} in ${vpc_name}"
+ g3k_kv_filter "${GEN3_HOME}/kube/services/autoscaler/cluster-autoscaler-autodiscover.yaml" VPC_NAME "${vpc_name}" CAS_VERSION ${casv} | g3kubectl "--namespace=kube-system" delete -f -
else
- echo "kube-setup-autoscaler exiting - cluster-autoscaler already deployed, use --force to redeploy"
+ echo "kube-setup-autoscaler exiting - cluster-autoscaler not deployed"
fi
}
function HELP(){
- echo "Usage: $SCRIPT [-v] [-f] "
+ echo "Usage: $SCRIPT [-v] [-f] [-r]"
echo "Options:"
echo "No option is mandatory, however you can provide the following:"
echo " -v num --version num --create=num Cluster autoscaler version number"
echo " -f --force Force and update if it is already installed"
+ echo " -r --remove remove deployment if already installed"
}
#echo $(get_autoscaler_version)
-OPTSPEC="hfv:-:"
+OPTSPEC="hfvr:-:"
while getopts "$OPTSPEC" optchar; do
case "${optchar}" in
-)
@@ -107,6 +131,10 @@ while getopts "$OPTSPEC" optchar; do
version=*)
CAS_VERSION=${OPTARG#*=}
;;
+ remove)
+ remove
+ exit 0
+ ;;
*)
if [ "$OPTERR" = 1 ] && [ "${OPTSPEC:0:1}" != ":" ]; then
echo "Unknown option --${OPTARG}" >&2
@@ -121,6 +149,10 @@ while getopts "$OPTSPEC" optchar; do
v)
CAS_VERSION=${OPTARG}
;;
+ r)
+ remove
+ exit 0
+ ;;
*)
if [ "$OPTERR" != 1 ] || [ "${OPTSPEC:0:1}" = ":" ]; then
echo "Non-option argument: '-${OPTARG}'" >&2
@@ -131,4 +163,4 @@ while getopts "$OPTSPEC" optchar; do
esac
done
-deploy
+deploy
\ No newline at end of file
diff --git a/gen3/bin/kube-setup-aws-es-proxy.sh b/gen3/bin/kube-setup-aws-es-proxy.sh
index d3aafcedc..986c5bf05 100644
--- a/gen3/bin/kube-setup-aws-es-proxy.sh
+++ b/gen3/bin/kube-setup-aws-es-proxy.sh
@@ -8,23 +8,46 @@
source "${GEN3_HOME}/gen3/lib/utils.sh"
gen3_load "gen3/lib/kube-setup-init"
+# Deploy Datadog with argocd if flag is set in the manifest path
+manifestPath=$(g3k_manifest_path)
+es7="$(jq -r ".[\"global\"][\"es7\"]" < "$manifestPath" | tr '[:upper:]' '[:lower:]')"
+
[[ -z "$GEN3_ROLL_ALL" ]] && gen3 kube-setup-secrets
if g3kubectl get secrets/aws-es-proxy > /dev/null 2>&1; then
envname="$(gen3 api environment)"
- if ES_ENDPOINT="$(aws es describe-elasticsearch-domains --domain-names ${envname}-gen3-metadata --query "DomainStatusList[*].Endpoints" --output text)" \
- && [[ -n "${ES_ENDPOINT}" && -n "${envname}" ]]; then
- gen3 roll aws-es-proxy GEN3_ES_ENDPOINT "${ES_ENDPOINT}"
- g3kubectl apply -f "${GEN3_HOME}/kube/services/aws-es-proxy/aws-es-proxy-service.yaml"
- gen3_log_info "kube-setup-aws-es-proxy" "The aws-es-proxy service has been deployed onto the k8s cluster."
+
+ if [ "$es7" = true ]; then
+ if ES_ENDPOINT="$(aws es describe-elasticsearch-domains --domain-names ${envname}-gen3-metadata-2 --query "DomainStatusList[*].Endpoints" --output text)" \
+ && [[ -n "${ES_ENDPOINT}" && -n "${envname}" ]]; then
+ gen3 roll aws-es-proxy GEN3_ES_ENDPOINT "${ES_ENDPOINT}"
+ g3kubectl apply -f "${GEN3_HOME}/kube/services/aws-es-proxy/aws-es-proxy-priority-class.yaml"
+ g3kubectl apply -f "${GEN3_HOME}/kube/services/aws-es-proxy/aws-es-proxy-service.yaml"
+ gen3_log_info "kube-setup-aws-es-proxy" "The aws-es-proxy service has been deployed onto the k8s cluster."
+ else
+ #
+ # probably running in jenkins or job environment
+ # try to make sure network policy labels are up to date
+ #
+ gen3_log_info "kube-setup-aws-es-proxy" "Not deploying aws-es-proxy, no endpoint to hook it up."
+ gen3 kube-setup-networkpolicy service aws-es-proxy
+ g3kubectl patch deployment "aws-es-proxy-deployment" -p '{"spec":{"template":{"metadata":{"labels":{"netvpc":"yes"}}}}}' || true
+ fi
else
- #
- # probably running in jenkins or job environment
- # try to make sure network policy labels are up to date
- #
- gen3_log_info "kube-setup-aws-es-proxy" "Not deploying aws-es-proxy, no endpoint to hook it up."
- gen3 kube-setup-networkpolicy service aws-es-proxy
- g3kubectl patch deployment "aws-es-proxy-deployment" -p '{"spec":{"template":{"metadata":{"labels":{"netvpc":"yes"}}}}}' || true
+ if ES_ENDPOINT="$(aws es describe-elasticsearch-domains --domain-names ${envname}-gen3-metadata --query "DomainStatusList[*].Endpoints" --output text)" \
+ && [[ -n "${ES_ENDPOINT}" && -n "${envname}" ]]; then
+ gen3 roll aws-es-proxy GEN3_ES_ENDPOINT "${ES_ENDPOINT}"
+ g3kubectl apply -f "${GEN3_HOME}/kube/services/aws-es-proxy/aws-es-proxy-service.yaml"
+ gen3_log_info "kube-setup-aws-es-proxy" "The aws-es-proxy service has been deployed onto the k8s cluster."
+ else
+ #
+ # probably running in jenkins or job environment
+ # try to make sure network policy labels are up to date
+ #
+ gen3_log_info "kube-setup-aws-es-proxy" "Not deploying aws-es-proxy, no endpoint to hook it up."
+ gen3 kube-setup-networkpolicy service aws-es-proxy
+ g3kubectl patch deployment "aws-es-proxy-deployment" -p '{"spec":{"template":{"metadata":{"labels":{"netvpc":"yes"}}}}}' || true
+ fi
fi
gen3 job cron es-garbage '@daily'
else
diff --git a/gen3/bin/kube-setup-cedar-wrapper.sh b/gen3/bin/kube-setup-cedar-wrapper.sh
index 9a899a770..a56bebc40 100644
--- a/gen3/bin/kube-setup-cedar-wrapper.sh
+++ b/gen3/bin/kube-setup-cedar-wrapper.sh
@@ -1,6 +1,58 @@
source "${GEN3_HOME}/gen3/lib/utils.sh"
gen3_load "gen3/lib/kube-setup-init"
+create_client_and_secret() {
+ local hostname=$(gen3 api hostname)
+ local client_name="cedar_ingest_client"
+ gen3_log_info "kube-setup-cedar-wrapper" "creating fence ${client_name} for $hostname"
+ # delete any existing fence cedar clients
+ g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-delete --client ${client_name} > /dev/null 2>&1
+ local secrets=$(g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-create --client ${client_name} --grant-types client_credentials | tail -1)
+ # secrets looks like ('CLIENT_ID', 'CLIENT_SECRET')
+ if [[ ! $secrets =~ (\'(.*)\', \'(.*)\') ]]; then
+ gen3_log_err "kube-setup-cedar-wrapper" "Failed generating ${client_name}"
+ return 1
+ else
+ local client_id="${BASH_REMATCH[2]}"
+ local client_secret="${BASH_REMATCH[3]}"
+ gen3_log_info "Create cedar-client secrets file"
+ cat - < /dev/null 2>&1; then
+ local have_cedar_client_secret="1"
+ else
+ gen3_log_info "No g3auto cedar-client key present in secret"
+ fi
+
+ local client_name="cedar_ingest_client"
+ local client_list=$(g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-list)
+ local client_count=$(echo "$client_list=" | grep -cE "'name':.*'${client_name}'")
+ gen3_log_info "CEDAR client count = ${client_count}"
+
+ if [[ -z $have_cedar_client_secret ]] || [[ ${client_count} -lt 1 ]]; then
+ gen3_log_info "Creating new cedar-ingest client and secret"
+ local credsPath="$(gen3_secrets_folder)/g3auto/cedar/${cedar_creds_file}"
+ if ! create_client_and_secret > $credsPath; then
+ gen3_log_err "Failed to setup cedar-ingest secret"
+ return 1
+ else
+ gen3 secrets sync
+ gen3 job run usersync
+ fi
+ fi
+}
+
[[ -z "$GEN3_ROLL_ALL" ]] && gen3 kube-setup-secrets
if ! g3kubectl get secrets/cedar-g3auto > /dev/null 2>&1; then
@@ -8,6 +60,13 @@ if ! g3kubectl get secrets/cedar-g3auto > /dev/null 2>&1; then
return 1
fi
+if [[ -n "$JENKINS_HOME" ]]; then
+ gen3_log_info "Skipping cedar-client creds setup in non-adminvm environment"
+else
+ gen3_log_info "Checking cedar-client creds"
+ setup_creds
+fi
+
if ! gen3 secrets decode cedar-g3auto cedar_api_key.txt > /dev/null 2>&1; then
gen3_log_err "No CEDAR api key present in cedar-g3auto secret, not rolling CEDAR wrapper"
return 1
diff --git a/gen3/bin/kube-setup-cohort-middleware.sh b/gen3/bin/kube-setup-cohort-middleware.sh
new file mode 100644
index 000000000..a6a024578
--- /dev/null
+++ b/gen3/bin/kube-setup-cohort-middleware.sh
@@ -0,0 +1,66 @@
+#!/bin/bash
+# Deploy cohort-middleware into existing commons
+
+source "${GEN3_HOME}/gen3/lib/utils.sh"
+gen3_load "gen3/lib/kube-setup-init"
+
+setup_secrets() {
+ gen3_log_info "Deploying secrets for cohort-middleware"
+ # subshell
+ if [[ -n "$JENKINS_HOME" ]]; then
+ gen3_log_err "skipping secrets setup in non-adminvm environment"
+ return 0
+ fi
+
+ (
+ if ! dbcreds="$(gen3 db creds ohdsi)"; then
+ gen3_log_err "unable to find db creds for ohdsi service (was Atlas deployed?)"
+ return 1
+ fi
+
+ mkdir -p $(gen3_secrets_folder)/g3auto/cohort-middleware
+ credsFile="$(gen3_secrets_folder)/g3auto/cohort-middleware/development.yaml"
+
+ if [[ (! -f "$credsFile") ]]; then
+ DB_NAME=$(jq -r ".db_database" <<< "$dbcreds")
+ export DB_NAME
+ DB_USER=$(jq -r ".db_username" <<< "$dbcreds")
+ export DB_USER
+ DB_PASS=$(jq -r ".db_password" <<< "$dbcreds")
+ export DB_PASS
+ DB_HOST=$(jq -r ".db_host" <<< "$dbcreds")
+ export DB_HOST
+
+ cat - > "$credsFile" < /dev/null 2>&1); then
gen3_log_info "Creating namespace datadog"
g3kubectl create namespace datadog
@@ -44,7 +48,45 @@ if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then
fi
helm repo add datadog https://helm.datadoghq.com --force-update 2> >(grep -v 'This is insecure' >&2)
helm repo update 2> >(grep -v 'This is insecure' >&2)
- helm upgrade --install datadog -f "$GEN3_HOME/kube/services/datadog/values.yaml" datadog/datadog -n datadog --version 3.1.9 2> >(grep -v 'This is insecure' >&2)
+ if [ "$argocd" = true ]; then
+ g3kubectl apply -f "$GEN3_HOME/kube/services/datadog/datadog-application.yaml" --namespace=argocd
+ else
+ helm upgrade --install datadog -f "$GEN3_HOME/kube/services/datadog/values.yaml" datadog/datadog -n datadog --version 3.6.4 2> >(grep -v 'This is insecure' >&2)
+ fi
+
+ # Check the manifest to see if we want to set up database monitoring
+ # Get the name of the cluster
+ # Run the command
+
+ if g3k_manifest_lookup .datadog.db_monitoring_enabled &> /dev/null; then
+ gen3_log_info "Detected that this commons is using database monitoring. Setting that up now."
+ clusters=$(aws rds describe-db-clusters --query "DBClusters[].DBClusterIdentifier" --output text)
+ clusterArray=($clusters)
+
+ for i in "${!clusterArray[@]}"; do
+ echo "$((i+1)). ${clusterArray[i]}"
+ done
+
+ selected="false"
+ selection=""
+
+ until [ $selected == "true" ]
+ do
+ read -p "Enter the number of the cluster you want to monitor (1-${#clusterArray[@]}): " num
+ if [[ "$num" =~ ^[0-9]+$ ]] && ((num >= 1 && num <= ${#clusterArray[@]})); then
+ echo "You entered: $num"
+ selected="true"
+ selection=${clusterArray[$num - 1]}
+ else
+ echo "Invalid input: $num"
+ fi
+ done
+
+ gen3 kube-setup-aurora-monitoring "$selection"
+ else
+ gen3_log_info "No database monitoring detected. We're done here."
+ fi
+
)
else
gen3_log_info "kube-setup-datadog exiting - datadog already deployed, use --force to redeploy"
diff --git a/gen3/bin/kube-setup-dicom.sh b/gen3/bin/kube-setup-dicom.sh
new file mode 100644
index 000000000..e49060ecb
--- /dev/null
+++ b/gen3/bin/kube-setup-dicom.sh
@@ -0,0 +1,119 @@
+#!/bin/bash
+
+source "${GEN3_HOME}/gen3/lib/utils.sh"
+gen3_load "gen3/gen3setup"
+
+hostname=$(gen3 api hostname)
+export hostname
+namespace=$(gen3 api namespace)
+export namespace
+
+# Deploy the dicom-server service
+setup_database_and_config() {
+ gen3_log_info "setting up dicom-server DB and config"
+
+ if g3kubectl describe secret orthanc-s3-g3auto > /dev/null 2>&1; then
+ gen3_log_info "orthanc-s3-g3auto secret already configured"
+ return 0
+ fi
+ if [[ -n "$JENKINS_HOME" || ! -f "$(gen3_secrets_folder)/creds.json" ]]; then
+ gen3_log_err "skipping db setup in non-adminvm environment"
+ return 0
+ fi
+
+ # Setup config files that dicom-server consumes
+ local secretsFolder
+ secretsFolder="$(gen3_secrets_folder)/g3auto/orthanc-s3"
+ if [[ ! -f "$secretsFolder/orthanc_config_overwrites.json" ]]; then
+ if [[ ! -f "$secretsFolder/dbcreds.json" ]]; then
+ if ! gen3 db setup orthanc-s3; then
+ gen3_log_err "Failed setting up orthanc database for dicom-server"
+ return 1
+ fi
+ fi
+
+ ref_hostname="${hostname//\./-}"
+ bucketname="${ref_hostname}-orthanc-storage"
+ awsuser="${ref_hostname}-orthanc"
+
+ if [[ ! -f "$secretsFolder/s3creds.json" ]]; then
+ gen3 s3 create "${bucketname}"
+ gen3 awsuser create "${awsuser}"
+ gen3 s3 attach-bucket-policy "${bucketname}" --read-write --user-name "${awsuser}"
+
+ user=$(gen3 secrets decode "${awsuser}"-g3auto awsusercreds.json)
+ key_id=$(jq -r .id <<< "$user")
+ access_key=$(jq -r .secret <<< "$user")
+
+ cat - > "$secretsFolder/s3creds.json" < "$secretsFolder/orthanc_config_overwrites.json" < /dev/null 2>&1; then
+ export DICOM_SERVER_URL="/dicom-server"
+ gen3_log_info "attaching ohif viewer to old dicom-server (orthanc w/ aurora)"
+ fi
+
+ if g3k_manifest_lookup .versions.orthanc > /dev/null 2>&1; then
+ export DICOM_SERVER_URL="/orthanc"
+ gen3_log_info "attaching ohif viewer to new dicom-server (orthanc w/ s3)"
+ fi
+
+ envsubst <"${GEN3_HOME}/kube/services/ohif-viewer/app-config.js" > "$secretsFolder/app-config.js"
+
+ gen3 secrets sync 'setup orthanc-s3-g3auto secrets'
+}
+
+if ! setup_database_and_config; then
+ gen3_log_err "kube-setup-dicom bailing out - database/config failed setup"
+ exit 1
+fi
+
+gen3 roll orthanc
+g3kubectl apply -f "${GEN3_HOME}/kube/services/orthanc/orthanc-service.yaml"
+
+cat < /dev/null; then
+ ecrRoleArn=$(g3kubectl get configmap manifest-global -o jsonpath={.data.ecr-access-job-role-arn})
+ fi
+ if [ -z "$ecrRoleArn" ]; then
+ gen3_log_err "Missing 'global.ecr-access-job-role-arn' configuration in manifest.json"
+ return 1
+ fi
+
+ local saName="ecr-access-job-sa"
+ if ! g3kubectl get sa "$saName" > /dev/null 2>&1; then
+ tempFile="ecr-access-job-policy.json"
+ cat - > $tempFile </dev/null 2>&1; then
- echo "fence-visa-update being added as a cronjob b/c fence >= 6.0.0 or 2022.07"
- gen3 job cron fence-visa-update "30 * * * *"
+ # Extract the value of ENABLE_VISA_UPDATE_CRON from the configmap manifest-fence (fence-config-public.yaml)
+ ENABLE_VISA_UPDATE_CRON=$(kubectl get cm manifest-fence -o=jsonpath='{.data.fence-config-public\.yaml}' | yq -r .ENABLE_VISA_UPDATE_CRON)
+
+ # Delete the fence-visa-update cronjob if ENABLE_VISA_UPDATE_CRON is set to false or not set or null in the configmap manifest-fence
+ if [[ "$ENABLE_VISA_UPDATE_CRON" == "false" ]] || [[ "$ENABLE_VISA_UPDATE_CRON" == "null" ]] || [[ -z "$ENABLE_VISA_UPDATE_CRON" ]]; then
+ echo "Deleting fence-visa-update cronjob"
+ kubectl delete cronjob fence-visa-update
+ elif [[ "$ENABLE_VISA_UPDATE_CRON" == "true" ]]; then
+ if ! g3kubectl get cronjob fence-visa-update >/dev/null 2>&1; then
+ echo "fence-visa-update being added as a cronjob b/c fence >= 6.0.0 or 2022.07"
+ gen3 job cron fence-visa-update "30 * * * *"
+ fi
+ else
+ echo "ENABLE_VISA_UPDATE_CRON has an unexpected value in the configmap manifest-fence. Skipping fence-visa-update cronjob setup."
fi
fi
diff --git a/gen3/bin/kube-setup-fluentd.sh b/gen3/bin/kube-setup-fluentd.sh
index 81fb0d2f6..02214be9e 100644
--- a/gen3/bin/kube-setup-fluentd.sh
+++ b/gen3/bin/kube-setup-fluentd.sh
@@ -25,11 +25,11 @@ if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then
export KUBECTL_NAMESPACE=logging
# lets check the the version of fluentd, and use the right configuration
- # as of 2020-05-06 the latest version is v1.10.2
- if [ ${fluentdVersion} == "v1.10.2-debian-cloudwatch-1.0" ];
+ # if we are using newer versions of fluentd, assume we are using containerd which needs the newer config
+ if [ ${fluentdVersion} == "v1.15.3-debian-cloudwatch-1.0" ];
then
fluentdConfigmap="${XDG_RUNTIME_DIR}/gen3.conf"
- cat ${GEN3_HOME}/kube/services/fluentd/gen3-1.10.2.conf | tee ${fluentdConfigmap} > /dev/null
+ cat ${GEN3_HOME}/kube/services/fluentd/gen3-1.15.3.conf | tee ${fluentdConfigmap} > /dev/null
gen3 update_config fluentd-gen3 "${fluentdConfigmap}"
rm ${fluentdConfigmap}
else
@@ -45,10 +45,16 @@ if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then
if g3kubectl --namespace=logging get daemonset fluentd > /dev/null 2>&1; then
g3kubectl "--namespace=logging" delete daemonset fluentd
fi
- (unset KUBECTL_NAMESPACE; gen3 gitops filter "${GEN3_HOME}/kube/services/fluentd/fluentd.yaml" GEN3_LOG_GROUP_NAME "${vpc_name}") | g3kubectl "--namespace=logging" apply -f -
+ export clusterversion=`kubectl version --short -o json | jq -r .serverVersion.minor`
+ if [ "${clusterversion}" = "24+" ]; then
+ (unset KUBECTL_NAMESPACE; gen3 gitops filter "${GEN3_HOME}/kube/services/fluentd/fluentd-eks-1.24.yaml" GEN3_LOG_GROUP_NAME "${vpc_name}") | g3kubectl "--namespace=logging" apply -f -
+ else
+ (unset KUBECTL_NAMESPACE; gen3 gitops filter "${GEN3_HOME}/kube/services/fluentd/fluentd.yaml" GEN3_LOG_GROUP_NAME "${vpc_name}") | g3kubectl "--namespace=logging" apply -f -
+ (unset KUBECTL_NAMESPACE; gen3 gitops filter "${GEN3_HOME}/kube/services/fluentd/fluentd-karpenter.yaml" GEN3_LOG_GROUP_NAME "${vpc_name}") | g3kubectl "--namespace=logging" apply -f -
+ fi
# We need this serviceaccount to be in the default namespace for the job and cronjob to properly work
g3kubectl apply -f "${GEN3_HOME}/kube/services/fluentd/fluent-jobs-serviceaccount.yaml" -n default
- if [ ${fluentdVersion} == "v1.10.2-debian-cloudwatch-1.0" ];
+ if [ ${fluentdVersion} == "v1.15.3-debian-cloudwatch-1.0" ];
then
(
unset KUBECTL_NAMESPACE
diff --git a/gen3/bin/kube-setup-gen3-discovery-ai.sh b/gen3/bin/kube-setup-gen3-discovery-ai.sh
new file mode 100644
index 000000000..44a472a74
--- /dev/null
+++ b/gen3/bin/kube-setup-gen3-discovery-ai.sh
@@ -0,0 +1,154 @@
+#!/bin/bash
+#
+# Deploy the gen3-discovery-ai service
+#
+
+source "${GEN3_HOME}/gen3/lib/utils.sh"
+gen3_load "gen3/gen3setup"
+
+# NOTE: no db for this service yet, but we'll likely need it in the future
+setup_database() {
+ gen3_log_info "setting up gen3-discovery-ai service ..."
+
+ if g3kubectl describe secret gen3-discovery-ai-g3auto > /dev/null 2>&1; then
+ gen3_log_info "gen3-discovery-ai-g3auto secret already configured"
+ return 0
+ fi
+ if [[ -n "$JENKINS_HOME" || ! -f "$(gen3_secrets_folder)/creds.json" ]]; then
+ gen3_log_err "skipping db setup in non-adminvm environment"
+ return 0
+ fi
+ # Setup .env file that gen3-discovery-ai service consumes
+ if [[ ! -f "$secretsFolder/gen3-discovery-ai.env" || ! -f "$secretsFolder/base64Authz.txt" ]]; then
+ local secretsFolder="$(gen3_secrets_folder)/g3auto/gen3-discovery-ai"
+
+ if [[ ! -f "$secretsFolder/dbcreds.json" ]]; then
+ if ! gen3 db setup gen3-discovery-ai; then
+ gen3_log_err "Failed setting up database for gen3-discovery-ai service"
+ return 1
+ fi
+ fi
+ if [[ ! -f "$secretsFolder/dbcreds.json" ]]; then
+ gen3_log_err "dbcreds not present in Gen3Secrets/"
+ return 1
+ fi
+
+ # go ahead and rotate the password whenever we regen this file
+ local password="$(gen3 random)"
+ cat - > "$secretsFolder/gen3-discovery-ai.env" < "$secretsFolder/base64Authz.txt"
+ fi
+ gen3 secrets sync 'setup gen3-discovery-ai-g3auto secrets'
+}
+
+if ! g3k_manifest_lookup '.versions."gen3-discovery-ai"' 2> /dev/null; then
+ gen3_log_info "kube-setup-gen3-discovery-ai exiting - gen3-discovery-ai service not in manifest"
+ exit 0
+fi
+
+# There's no db for this service *yet*
+#
+# if ! setup_database; then
+# gen3_log_err "kube-setup-gen3-discovery-ai bailing out - database failed setup"
+# exit 1
+# fi
+
+setup_storage() {
+ local saName="gen3-discovery-ai-sa"
+ g3kubectl create sa "$saName" > /dev/null 2>&1 || true
+
+ local secret
+ local secretsFolder="$(gen3_secrets_folder)/g3auto/gen3-discovery-ai"
+
+ secret="$(g3kubectl get secret gen3-discovery-ai-g3auto -o json 2> /dev/null)"
+ local hasStorageCfg
+ hasStorageCfg=$(jq -r '.data | has("storage_config.json")' <<< "$secret")
+
+ if [ "$hasStorageCfg" = "false" ]; then
+ gen3_log_info "setting up storage for gen3-discovery-ai service"
+ #
+ # gen3-discovery-ai-g3auto secret still does not exist
+ # we need to setup an S3 bucket and IAM creds
+ # let's avoid creating multiple buckets for different
+ # deployments to the same k8s cluster (dev, etc)
+ #
+ local bucketName
+ local accountNumber
+ local environment
+
+ if ! accountNumber="$(aws sts get-caller-identity --output text --query 'Account')"; then
+ gen3_log_err "could not determine account numer"
+ return 1
+ fi
+
+ gen3_log_info "accountNumber: ${accountNumber}"
+
+ if ! environment="$(g3kubectl get configmap manifest-global -o json | jq -r .data.environment)"; then
+ gen3_log_err "could not determine environment from manifest-global - bailing out of gen3-discovery-ai setup"
+ return 1
+ fi
+
+ gen3_log_info "environment: ${environment}"
+
+ # try to come up with a unique but composable bucket name
+ bucketName="gen3-discovery-ai-${accountNumber}-${environment//_/-}"
+
+ gen3_log_info "bucketName: ${bucketName}"
+
+ if aws s3 ls --page-size 1 "s3://${bucketName}" > /dev/null 2>&1; then
+ gen3_log_info "${bucketName} s3 bucket already exists - probably in use by another namespace - copy the creds from there to $(gen3_secrets_folder)/g3auto/gen3-discovery-ai"
+ # continue on ...
+ elif ! gen3 s3 create "${bucketName}"; then
+ gen3_log_err "maybe failed to create bucket ${bucketName}, but maybe not, because the terraform script is flaky"
+ fi
+
+ local hostname
+ hostname="$(gen3 api hostname)"
+ jq -r -n --arg bucket "${bucketName}" --arg hostname "${hostname}" '.bucket=$bucket | .prefix=$hostname' > "${secretsFolder}/storage_config.json"
+ gen3 secrets sync 'setup gen3-discovery-ai credentials'
+
+ local roleName
+ roleName="$(gen3 api safe-name gen3-discovery-ai)" || return 1
+
+ if ! gen3 awsrole info "$roleName" > /dev/null; then # setup role
+ bucketName="$( (gen3 secrets decode 'gen3-discovery-ai-g3auto' 'storage_config.json' || echo ERROR) | jq -r .bucket)" || return 1
+ gen3 awsrole create "$roleName" "$saName" || return 1
+ gen3 s3 attach-bucket-policy "$bucketName" --read-write --role-name "${roleName}"
+ # try to give the gitops role read/write permissions on the bucket
+ local gitopsRoleName
+ gitopsRoleName="$(gen3 api safe-name gitops)"
+ gen3 s3 attach-bucket-policy "$bucketName" --read-write --role-name "${gitopsRoleName}"
+ fi
+ fi
+
+ return 0
+}
+
+if ! setup_storage; then
+ gen3_log_err "kube-setup-gen3-discovery-ai bailing out - storage failed setup"
+ exit 1
+fi
+
+gen3_log_info "Setup complete, syncing configuration to bucket"
+
+bucketName="$( (gen3 secrets decode 'gen3-discovery-ai-g3auto' 'storage_config.json' || echo ERROR) | jq -r .bucket)" || exit 1
+aws s3 sync "$(dirname $(g3k_manifest_path))/gen3-discovery-ai/knowledge" "s3://$bucketName" --delete
+
+gen3 roll gen3-discovery-ai
+g3kubectl apply -f "${GEN3_HOME}/kube/services/gen3-discovery-ai/gen3-discovery-ai-service.yaml"
+
+if [[ -z "$GEN3_ROLL_ALL" ]]; then
+ gen3 kube-setup-networkpolicy
+ gen3 kube-setup-revproxy
+fi
+
+gen3_log_info "The gen3-discovery-ai service has been deployed onto the kubernetes cluster"
+gen3_log_info "test with: curl https://commons-host/ai"
diff --git a/gen3/bin/kube-setup-hatchery.sh b/gen3/bin/kube-setup-hatchery.sh
index 1192c293e..dadbbd930 100644
--- a/gen3/bin/kube-setup-hatchery.sh
+++ b/gen3/bin/kube-setup-hatchery.sh
@@ -5,6 +5,44 @@
source "${GEN3_HOME}/gen3/lib/utils.sh"
gen3_load "gen3/gen3setup"
+function CostUsagePolicy() {
+ roleName="$(gen3 api safe-name hatchery-sa)"
+ # Cost Usage Report policy
+ curPolicy="costUsageReportPolicy"
+
+ # Use the AWS CLI to list all policies attached to the role and then grep to search for the policy name
+ policyArn=$(aws iam list-role-policies --role-name "$roleName" | grep "$curPolicy")
+
+ # Check if the policy ARN variable is empty or not
+ if [ -n "$policyArn" ]; then
+ echo "Policy $curPolicy is attached to the role $roleName."
+ else
+ echo "Policy $curPolicy is NOT attached to the role $roleName."
+ echo "Attaching policy"
+ # Define the policy document
+ policyDocument='{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "VisualEditor0",
+ "Effect": "Allow",
+ "Action": "ce:GetCostAndUsage",
+ "Resource": "*"
+ }
+ ]
+ }'
+
+ # Create an inline policy for the role
+ aws iam put-role-policy --role-name "$roleName" --policy-name "$curPolicy" --policy-document "$policyDocument"
+ if [ $? -eq 0 ]; then
+ echo "Inline policy $curPolicy has been successfully created and attached to the role $roleName."
+ else
+ echo "There was an error creating the inline policy $curPolicy."
+ fi
+
+ fi
+}
+
# Jenkins friendly
export WORKSPACE="${WORKSPACE:-$HOME}"
@@ -20,11 +58,81 @@ gen3 jupyter j-namespace setup
#
(g3k_kv_filter ${GEN3_HOME}/kube/services/hatchery/serviceaccount.yaml BINDING_ONE "name: hatchery-binding1-$namespace" BINDING_TWO "name: hatchery-binding2-$namespace" CURRENT_NAMESPACE "namespace: $namespace" | g3kubectl apply -f -) || true
+function exists_or_create_gen3_license_table() {
+ # Create dynamodb table for gen3-license if it does not exist.
+ TARGET_TABLE="$1"
+ echo "Checking for dynamoDB table: ${TARGET_TABLE}"
-# cron job to distribute licenses if using Stata workspaces
-if [ "$(g3kubectl get configmaps/manifest-hatchery -o yaml | grep "\"image\": .*stata.*")" ];
-then
- gen3 job cron distribute-licenses '* * * * *'
+ FOUND_TABLE=`aws dynamodb list-tables | jq -r .TableNames | jq -c -r '.[]' | grep $TARGET_TABLE`
+ if [ -n "$FOUND_TABLE" ]; then
+ echo "Target table already exists in dynamoDB: $FOUND_TABLE"
+ else
+ echo "Creating table ${TARGET_TABLE}"
+ GSI=`g3kubectl get configmaps/manifest-hatchery -o json | jq -r '.data."license-user-maps-global-secondary-index"'`
+ if [[ -z "$GSI" || "$GSI" == "null" ]]; then
+ echo "Error: No global-secondary-index in configuration"
+ return 0
+ fi
+ aws dynamodb create-table \
+ --no-cli-pager \
+ --table-name "$TARGET_TABLE" \
+ --attribute-definitions AttributeName=itemId,AttributeType=S \
+ AttributeName=environment,AttributeType=S \
+ AttributeName=isActive,AttributeType=S \
+ --key-schema AttributeName=itemId,KeyType=HASH \
+ AttributeName=environment,KeyType=RANGE \
+ --provisioned-throughput ReadCapacityUnits=5,WriteCapacityUnits=5 \
+ --global-secondary-indexes \
+ "[
+ {
+ \"IndexName\": \"$GSI\",
+ \"KeySchema\": [{\"AttributeName\":\"environment\",\"KeyType\":\"HASH\"},
+ {\"AttributeName\":\"isActive\",\"KeyType\":\"RANGE\"}],
+ \"Projection\":{
+ \"ProjectionType\":\"INCLUDE\",
+ \"NonKeyAttributes\":[\"itemId\",\"userId\",\"licenseId\",\"licenseType\"]
+ },
+ \"ProvisionedThroughput\": {
+ \"ReadCapacityUnits\": 5,
+ \"WriteCapacityUnits\": 3
+ }
+ }
+ ]"
+ fi
+}
+
+TARGET_TABLE=`g3kubectl get configmaps/manifest-hatchery -o json | jq -r '.data."license-user-maps-dynamodb-table"'`
+if [[ -z "$TARGET_TABLE" || "$TARGET_TABLE" == "null" ]]; then
+ echo "No gen3-license table in configuration"
+ # cron job to distribute licenses if using Stata workspaces but not using dynamoDB
+ if [ "$(g3kubectl get configmaps/manifest-hatchery -o yaml | grep "\"image\": .*stata.*")" ];
+ then
+ gen3 job cron distribute-licenses '* * * * *'
+ fi
+else
+ echo "Found gen3-license table in configuration: $TARGET_TABLE"
+ exists_or_create_gen3_license_table "$TARGET_TABLE"
+fi
+
+# if `nextflow-global.imagebuilder-reader-role-arn` is set in hatchery config, allow hatchery
+# to assume the configured role
+imagebuilderRoleArn=$(g3kubectl get configmap manifest-hatchery -o jsonpath={.data.nextflow-global} | jq -r '."imagebuilder-reader-role-arn"')
+assumeImageBuilderRolePolicyBlock=""
+if [ -z "$imagebuilderRoleArn" ]; then
+ gen3_log_info "No 'nexftlow-global.imagebuilder-reader-role-arn' in Hatchery configuration, not granting AssumeRole"
+else
+ gen3_log_info "Found 'nexftlow-global.imagebuilder-reader-role-arn' in Hatchery configuration, granting AssumeRole"
+ assumeImageBuilderRolePolicyBlock=$( cat < /dev/null 2>&1; then
roleName="$(gen3 api safe-name hatchery-sa)"
gen3 awsrole create $roleName $saName
policyName="$(gen3 api safe-name hatchery-policy)"
- policyInfo=$(gen3_aws_run aws iam create-policy --policy-name "$policyName" --policy-document "$policy" --description "Allow hathcery to assume csoc_adminvm role in other accounts, for multi-account workspaces")
+ policyInfo=$(gen3_aws_run aws iam create-policy --policy-name "$policyName" --policy-document "$policy" --description "Allow hatchery to assume csoc_adminvm role in other accounts and manage dynamodb for multi-account workspaces, and to create resources for nextflow workspaces")
if [ -n "$policyInfo" ]; then
- policyArn="$(jq -e -r '.["Policy"].Arn' <<< "$policyInfo")" || { echo "Cannot get 'Policy.Arn' from output: $policyInfo"; return 1; }
+ policyArn="$(jq -e -r '.["Policy"].Arn' <<< "$policyInfo")" || { echo "Cannot get 'Policy.Arn' from output: $policyInfo"; return 1; }
else
- echo "Unable to create policy $policyName. Assuming it already exists and continuing"
+ echo "Unable to create policy '$policyName'. Assume it already exists and create a new version to update the permissions..."
policyArn=$(gen3_aws_run aws iam list-policies --query "Policies[?PolicyName=='$policyName'].Arn" --output text)
- fi
+ # there can only be up to 5 versions, so delete old versions (except the current default one)
+ versions="$(gen3_aws_run aws iam list-policy-versions --policy-arn $policyArn | jq -r '.Versions[] | select(.IsDefaultVersion != true) | .VersionId')"
+ versions=(${versions}) # string to array
+ for v in "${versions[@]}"; do
+ echo "Deleting old version '$v'"
+ gen3_aws_run aws iam delete-policy-version --policy-arn $policyArn --version-id $v
+ done
+
+ # create the new version
+ gen3_aws_run aws iam create-policy-version --policy-arn "$policyArn" --policy-document "$policy" --set-as-default
+ fi
gen3_log_info "Attaching policy '${policyName}' to role '${roleName}'"
gen3 awsrole attach-policy ${policyArn} --role-name ${roleName} --force-aws-cli || exit 1
gen3 awsrole attach-policy "arn:aws:iam::aws:policy/AWSResourceAccessManagerFullAccess" --role-name ${roleName} --force-aws-cli || exit 1
fi
+# function to setup IAM policies for CostUsageReport
+CostUsagePolicy
+
if [[ -f "$(gen3_secrets_folder)/prisma/apikey.json" ]]; then
ACCESSKEYID=$(jq -r .AccessKeyID "$(gen3_secrets_folder)/prisma/apikey.json")
SECRETKEY=$(jq -r .SecretKey "$(gen3_secrets_folder)/prisma/apikey.json")
@@ -94,4 +262,4 @@ fi
g3kubectl apply -f "${GEN3_HOME}/kube/services/hatchery/hatchery-service.yaml"
gen3 roll hatchery
-gen3 job cron hatchery-reaper '@daily'
\ No newline at end of file
+gen3 job cron hatchery-reaper "*/5 * * * *"
diff --git a/gen3/bin/kube-setup-ingress.sh b/gen3/bin/kube-setup-ingress.sh
index bf718c29e..b75470f73 100644
--- a/gen3/bin/kube-setup-ingress.sh
+++ b/gen3/bin/kube-setup-ingress.sh
@@ -1,28 +1,68 @@
#!/bin/bash
-#
-
-
source "${GEN3_HOME}/gen3/lib/utils.sh"
gen3_load "gen3/gen3setup"
gen3_load "gen3/lib/kube-setup-init"
+gen3_load "gen3/lib/g3k_manifest"
+# Deploy WAF if flag set in manifest
+manifestPath=$(g3k_manifest_path)
+deployWaf="$(jq -r ".[\"global\"][\"waf_enabled\"]" < "$manifestPath" | tr '[:upper:]' '[:lower:]')"
ctx="$(g3kubectl config current-context)"
ctxNamespace="$(g3kubectl config view -ojson | jq -r ".contexts | map(select(.name==\"$ctx\")) | .[0] | .context.namespace")"
scriptDir="${GEN3_HOME}/kube/services/ingress"
+gen3_ingress_setup_waf() {
+ gen3_log_info "Starting GPE-312 waf setup"
+ #variable to see if WAF already exists
+ export waf=`aws wafv2 list-web-acls --scope REGIONAL | jq -r '.WebACLs[]|select(.Name| contains(env.vpc_name)).Name'`
+if [[ -z $waf ]]; then
+ gen3_log_info "Creating Web ACL. This may take a few minutes."
+ aws wafv2 create-web-acl\
+ --name $vpc_name-waf \
+ --scope REGIONAL \
+ --default-action Allow={} \
+ --visibility-config SampledRequestsEnabled=true,CloudWatchMetricsEnabled=true,MetricName=GPE-312WebAclMetrics \
+ --rules file://${GEN3_HOME}/gen3/bin/waf-rules-GPE-312.json \
+ --region us-east-1
+ #Need to sleep to avoid "WAFUnavailableEntityException" error since the waf takes a bit to spin up
+ sleep 300
+else
+ gen3_log_info "WAF already exists. Skipping..."
+fi
+ gen3_log_info "Attaching ACL to ALB."
+ export acl_arn=`aws wafv2 list-web-acls --scope REGIONAL | jq -r '.WebACLs[]|select(.Name| contains(env.vpc_name)).ARN'`
+ export alb_name=`kubectl get ingress gen3-ingress | awk '{print $4}' | tail +2 | sed 's/^\([A-Za-z0-9]*-[A-Za-z0-9]*-[A-Za-z0-9]*\).*/\1/;q'`
+ export alb_arn=`aws elbv2 describe-load-balancers --name $alb_name | yq -r .LoadBalancers[0].LoadBalancerArn`
+ export association=`aws wafv2 list-resources-for-web-acl --web-acl-arn $acl_arn | grep $alb_arn| sed -e 's/^[ \t]*//' | sed -e 's/^"//' -e 's/"$//'`
+ #variable to see if the association already exists
+ echo "acl_arn: $acl_arn"
+ echo "alb_arn: $alb_arn"
+if [[ $association != $alb_arn ]]; then
+ aws wafv2 associate-web-acl\
+ --web-acl-arn $acl_arn \
+ --resource-arn $alb_arn \
+ --region us-east-1
+
+ gen3_log_info "Add ACL arn annotation to ALB ingress"
+ kubectl annotate ingress gen3-ingress "alb.ingress.kubernetes.io/wafv2-acl-arn=$acl_arn"
+else
+ gen3_log_info "ALB is already associated with ACL. Skipping..."
+fi
+}
+
+
+gen3_ingress_setup_role() {
# https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.4/deploy/installation/
# https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/v2.4.1/docs/install/iam_policy.json
# only do this if we are running in the default namespace
-if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then
- saName="aws-load-balancer-controller"
- roleName=$(gen3 api safe-name ingress)
- policyName=$(gen3 api safe-name ingress-policy)
- ingressPolicy="$(mktemp "$XDG_RUNTIME_DIR/ingressPolicy.json_XXXXXX")"
- arPolicyFile="$(mktemp "$XDG_RUNTIME_DIR/arPolicy.json_XXXXXX")"
-
+ local saName="aws-load-balancer-controller"
+ local roleName=$(gen3 api safe-name ingress)
+ local policyName=$(gen3 api safe-name ingress-policy)
+ local ingressPolicy="$(mktemp "$XDG_RUNTIME_DIR/ingressPolicy.json_XXXXXX")"
+ local arPolicyFile="$(mktemp "$XDG_RUNTIME_DIR/arPolicy.json_XXXXXX")"
# Create an inline policy for the ingress-controller
cat - > "$ingressPolicy" < /dev/null; then # setup role
+ if ! gen3 awsrole info "$roleName" "kube-system" > /dev/null; then # setup role
gen3_log_info "creating IAM role for ingress: $roleName, linking to sa $saName"
gen3 awsrole create "$roleName" "$saName" "kube-system" || return 1
aws iam put-role-policy --role-name "$roleName" --policy-document file://${ingressPolicy} --policy-name "$policyName" 1>&2
@@ -255,18 +317,31 @@ EOM
# update the annotation - just to be thorough
gen3 awsrole sa-annotate "$saName" "$roleName" kube-system
fi
-
- kubectl apply -k "github.com/aws/eks-charts/stable/aws-load-balancer-controller//crds?ref=master"
-
+}
+
+gen3_ingress_deploy_helm_chart() {
+ kubectl apply -k "github.com/aws/eks-charts/stable/aws-load-balancer-controller/crds?ref=master"
if (! helm status aws-load-balancer-controller -n kube-system > /dev/null 2>&1 ) || [[ "$1" == "--force" ]]; then
helm repo add eks https://aws.github.io/eks-charts 2> >(grep -v 'This is insecure' >&2)
helm repo update 2> >(grep -v 'This is insecure' >&2)
-
+
# # TODO: Move to values.yaml file
helm upgrade --install aws-load-balancer-controller eks/aws-load-balancer-controller -n kube-system --set clusterName=$(gen3 api environment) --set serviceAccount.create=false --set serviceAccount.name=aws-load-balancer-controller 2> >(grep -v 'This is insecure' >&2)
else
gen3_log_info "kube-setup-ingress exiting - ingress already deployed, use --force to redeploy"
fi
+}
+
+if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then
+ # Create role/SA for the alb's
+ gen3_ingress_setup_role
+ # Deploy the aws-load-balancer-controller helm chart and upgrade if --force flag applied
+ gen3_ingress_deploy_helm_chart $1
+else
+ if [[ -z $(kubectl get sa -n kube-system | grep aws-load-balancer-controller) ]]; then
+ gen3_log_err "Please run this in the default namespace first to setup the necessary roles"
+ exit 1
+ fi
fi
@@ -274,5 +349,6 @@ gen3_log_info "Applying ingress resource"
export ARN=$(g3kubectl get configmap global --output=jsonpath='{.data.revproxy_arn}')
g3kubectl apply -f "${GEN3_HOME}/kube/services/revproxy/revproxy-service.yaml"
envsubst <$scriptDir/ingress.yaml | g3kubectl apply -f -
-
-
+if [ "$deployWaf" = true ]; then
+ gen3_ingress_setup_waf
+fi
diff --git a/gen3/bin/kube-setup-jenkins2.sh b/gen3/bin/kube-setup-jenkins2.sh
new file mode 100644
index 000000000..f5233f978
--- /dev/null
+++ b/gen3/bin/kube-setup-jenkins2.sh
@@ -0,0 +1,71 @@
+#!/bin/bash
+#
+# Just a little helper for deploying jenkins onto k8s the first time
+#
+
+set -e
+
+export WORKSPACE="${WORKSPACE:-$HOME}"
+source "${GEN3_HOME}/gen3/lib/utils.sh"
+gen3_load "gen3/gen3setup"
+
+gen3 kube-setup-secrets
+
+#
+# Assume Jenkins should use 'jenkins' profile credentials in "${WORKSPACE}"/.aws/credentials
+#
+aws_access_key_id="$(aws configure get jenkins.aws_access_key_id)"
+aws_secret_access_key="$(aws configure get jenkins.aws_secret_access_key)"
+google_acct1_email="$(jq -r '.jenkins.google_acct1.email' < $(gen3_secrets_folder)/creds.json)"
+google_acct1_password="$(jq -r '.jenkins.google_acct1.password' < $(gen3_secrets_folder)/creds.json)"
+google_acct2_email="$(jq -r '.jenkins.google_acct2.email' < $(gen3_secrets_folder)/creds.json)"
+google_acct2_password="$(jq -r '.jenkins.google_acct2.password' < $(gen3_secrets_folder)/creds.json)"
+
+if [ -z "$aws_access_key_id" -o -z "$aws_secret_access_key" ]; then
+ gen3_log_err 'not configuring jenkins - could not extract secrets from aws configure'
+ exit 1
+fi
+if [[ -z "$google_acct1_email" || -z "$google_acct1_password" || -z "$google_acct2_email" || -z "$google_acct2_password" ]]; then
+ gen3_log_err "missing google credentials in '.jenkins' of creds.json"
+ exit 1
+fi
+
+if ! g3kubectl get secrets jenkins-secret > /dev/null 2>&1; then
+ # make it easy to rerun kube-setup-jenkins.sh
+ g3kubectl create secret generic jenkins-secret "--from-literal=aws_access_key_id=$aws_access_key_id" "--from-literal=aws_secret_access_key=$aws_secret_access_key"
+fi
+if ! g3kubectl get secrets google-acct1 > /dev/null 2>&1; then
+ g3kubectl create secret generic google-acct1 "--from-literal=email=${google_acct1_email}" "--from-literal=password=${google_acct1_password}"
+fi
+if ! g3kubectl get secrets google-acct2 > /dev/null 2>&1; then
+ g3kubectl create secret generic google-acct2 "--from-literal=email=${google_acct2_email}" "--from-literal=password=${google_acct2_password}"
+fi
+
+if ! g3kubectl get storageclass gp2 > /dev/null 2>&1; then
+ g3kubectl apply -f "${GEN3_HOME}/kube/services/jenkins/10storageclass.yaml"
+fi
+if ! g3kubectl get persistentvolumeclaim datadir-jenkins > /dev/null 2>&1; then
+ g3kubectl apply -f "${GEN3_HOME}/kube/services/jenkins/00pvc.yaml"
+fi
+
+# Note: jenkins service account is configured by `kube-setup-roles`
+gen3 kube-setup-roles
+# Note: only the 'default' namespace jenkins-service account gets a cluster rolebinding
+g3kubectl apply -f "${GEN3_HOME}/kube/services/jenkins/clusterrolebinding-devops.yaml"
+
+# Note: requires Jenkins entry in cdis-manifest
+gen3 roll jenkins2
+gen3 roll jenkins2-worker
+gen3 roll jenkins2-ci-worker
+
+#
+# Get the ARN of the SSL certificate for the commons -
+# We'll optimistically assume it's a wildcard cert that
+# is appropriate to also attach to the jenkins ELB
+#
+export ARN=$(g3kubectl get configmap global --output=jsonpath='{.data.revproxy_arn}')
+if [[ ! -z $ARN ]]; then
+ envsubst <"${GEN3_HOME}/kube/services/jenkins/jenkins-service.yaml" | g3kubectl apply -f -
+else
+ gen3_log_info "Global configmap not configured - not launching service (require SSL cert ARN)"
+fi
diff --git a/gen3/bin/kube-setup-karpenter.sh b/gen3/bin/kube-setup-karpenter.sh
new file mode 100644
index 000000000..0a743f7ed
--- /dev/null
+++ b/gen3/bin/kube-setup-karpenter.sh
@@ -0,0 +1,270 @@
+#!/bin/bash
+
+#set -e
+
+source "${GEN3_HOME}/gen3/lib/utils.sh"
+gen3_load "gen3/gen3setup"
+
+ctx="$(g3kubectl config current-context)"
+ctxNamespace="$(g3kubectl config view -ojson | jq -r ".contexts | map(select(.name==\"$ctx\")) | .[0] | .context.namespace")"
+
+gen3_deploy_karpenter() {
+ # Only do cluster level changes in the default namespace to prevent conflicts
+ if [[ ("$ctxNamespace" == "default" || "$ctxNamespace" == "null") ]]; then
+ gen3_log_info "Deploying karpenter"
+ # If the karpenter namespace doesn't exist or the force flag isn't in place then deploy
+ if [[ ( -z $(g3kubectl get namespaces | grep karpenter) || $FORCE == "true" ) ]]; then
+ gen3_log_info "Ensuring that the spot instance service linked role is setup"
+ # Ensure the spot instance service linked role is setup
+ # It is required for running spot instances
+ #### Uncomment this when we fix the sqs helper to allow for usage by more than one service
+ #gen3_create_karpenter_sqs_eventbridge
+ aws iam create-service-linked-role --aws-service-name spot.amazonaws.com || true
+ if g3k_config_lookup .global.karpenter_version; then
+ karpenter=$(g3k_config_lookup .global.karpenter_version)
+ fi
+ export clusterversion=`kubectl version -o json | jq -r .serverVersion.minor`
+ if [ "${clusterversion}" = "28+" ]; then
+ karpenter=${karpenter:-v0.32.9}
+ elif [ "${clusterversion}" = "25+" ]; then
+ karpenter=${karpenter:-v0.27.0}
+ elif [ "${clusterversion}" = "24+" ]; then
+ karpenter=${karpenter:-v0.24.0}
+ else
+ karpenter=${karpenter:-v0.32.9}
+ fi
+ local queue_name="$(gen3 api safe-name karpenter-sqs)"
+ echo '{
+ "Statement": [
+ {
+ "Action": [
+ "ssm:GetParameter",
+ "iam:PassRole",
+ "iam:*InstanceProfile",
+ "ec2:DescribeImages",
+ "ec2:RunInstances",
+ "ec2:DescribeSubnets",
+ "ec2:DescribeSecurityGroups",
+ "ec2:DescribeLaunchTemplates",
+ "ec2:DescribeInstances",
+ "ec2:DescribeInstanceTypes",
+ "ec2:DescribeInstanceTypeOfferings",
+ "ec2:DescribeAvailabilityZones",
+ "ec2:DeleteLaunchTemplate",
+ "ec2:CreateTags",
+ "ec2:CreateLaunchTemplate",
+ "ec2:CreateFleet",
+ "ec2:DescribeSpotPriceHistory",
+ "pricing:GetProducts"
+ ],
+ "Effect": "Allow",
+ "Resource": "*",
+ "Sid": "Karpenter"
+ },
+ {
+ "Action": [
+ "sqs:DeleteMessage",
+ "sqs:GetQueueAttributes",
+ "sqs:GetQueueUrl",
+ "sqs:ReceiveMessage"
+ ],
+ "Effect": "Allow",
+ "Resource": "arn:aws:sqs:*:'$(aws sts get-caller-identity --output text --query "Account")':karpenter-sqs-'$(echo vpc_name)'",
+ "Sid": "Karpenter2"
+ },
+ {
+ "Action": "ec2:TerminateInstances",
+ "Condition": {
+ "StringLike": {
+ "ec2:ResourceTag/Name": "*karpenter*"
+ }
+ },
+ "Effect": "Allow",
+ "Resource": "*",
+ "Sid": "ConditionalEC2Termination"
+ },
+ {
+ "Sid": "VisualEditor0",
+ "Effect": "Allow",
+ "Action": [
+ "kms:*"
+ ],
+ "Resource": "*"
+ }
+ ],
+ "Version": "2012-10-17"
+ }' > $XDG_RUNTIME_DIR/controller-policy.json
+
+ gen3_log_info "Creating karpenter namespace"
+ g3kubectl create namespace karpenter 2> /dev/null || true
+
+ gen3_log_info "Creating karpenter AWS role and k8s service accounts"
+ gen3 awsrole create "karpenter-controller-role-$vpc_name" karpenter "karpenter" || true
+ gen3 awsrole sa-annotate karpenter "karpenter-controller-role-$vpc_name" karpenter || true
+ # Have to delete SA because helm chart will create the SA and there will be a conflict
+
+ gen3_log_info "Have to delete SA because helm chart will create the SA and there will be a conflict"
+ #g3kubectl delete sa karpenter -n karpenter
+
+ gen3_log_info "aws iam put-role-policy --role-name "karpenter-controller-role-$vpc_name" --policy-document file://$XDG_RUNTIME_DIR/controller-policy.json --policy-name "karpenter-controller-policy" 1>&2 || true"
+ aws iam put-role-policy --role-name "karpenter-controller-role-$vpc_name" --policy-document file://$XDG_RUNTIME_DIR/controller-policy.json --policy-name "karpenter-controller-policy" 1>&2 || true
+ gen3_log_info "Need to tag the subnets/sg's so that karpenter can discover them automatically"
+ # Need to tag the subnets/sg's so that karpenter can discover them automatically
+ subnets=$(aws ec2 describe-subnets --filter 'Name=tag:Environment,Values='$vpc_name'' 'Name=tag:Name,Values=eks_private_*' --query 'Subnets[].SubnetId' --output text)
+ # Will apprend secondary CIDR block subnets to be tagged as well, and if none are found then will not append anything to list
+ subnets+=" $(aws ec2 describe-subnets --filter 'Name=tag:Environment,Values='$vpc_name'' 'Name=tag:Name,Values=eks_secondary_cidr_subnet_*' --query 'Subnets[].SubnetId' --output text)"
+ security_groups=$(aws ec2 describe-security-groups --filter 'Name=tag:Name,Values='$vpc_name'-nodes-sg,ssh_eks_'$vpc_name'' --query 'SecurityGroups[].GroupId' --output text) || true
+ security_groups_jupyter=$(aws ec2 describe-security-groups --filter 'Name=tag:Name,Values='$vpc_name'-nodes-sg-jupyter,ssh_eks_'$vpc_name'-nodepool-jupyter' --query 'SecurityGroups[].GroupId' --output text) || true
+ security_groups_workflow=$(aws ec2 describe-security-groups --filter 'Name=tag:Name,Values='$vpc_name'-nodes-sg-workflow,ssh_eks_'$vpc_name'-nodepool-workflow' --query 'SecurityGroups[].GroupId' --output text) || true
+ cluster_endpoint="$(aws eks describe-cluster --name ${vpc_name} --query "cluster.endpoint" --output text)"
+
+ aws ec2 create-tags --tags "Key=karpenter.sh/discovery,Value=${vpc_name}" --resources ${security_groups} || true
+ aws ec2 create-tags --tags "Key=karpenter.sh/discovery,Value=${vpc_name}" --resources ${subnets} || true
+ aws ec2 create-tags --tags "Key=karpenter.sh/discovery,Value=${vpc_name}-jupyter" --resources ${security_groups_jupyter} || true
+ aws ec2 create-tags --tags "Key=karpenter.sh/discovery,Value=${vpc_name}-workflow" --resources ${security_groups_workflow} || true
+ echo '{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Condition": {
+ "ArnLike": {
+ "aws:SourceArn": "arn:aws:eks:us-east-1:'$(aws sts get-caller-identity --output text --query "Account")':fargateprofile/'$(echo $vpc_name)'/*"
+ }
+ },
+ "Principal": {
+ "Service": "eks-fargate-pods.amazonaws.com"
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+ }' > $XDG_RUNTIME_DIR/fargate-policy.json
+ aws iam create-role --role-name AmazonEKSFargatePodExecutionRole-${vpc_name} --assume-role-policy-document file://"$XDG_RUNTIME_DIR/fargate-policy.json" || true
+ aws iam attach-role-policy --policy-arn arn:aws:iam::aws:policy/AmazonEKSFargatePodExecutionRolePolicy --role-name AmazonEKSFargatePodExecutionRole-${vpc_name} || true
+ # Wait for IAM changes to take effect
+ sleep 15
+ aws eks create-fargate-profile --fargate-profile-name karpenter-profile --cluster-name $vpc_name --pod-execution-role-arn arn:aws:iam::$(aws sts get-caller-identity --output text --query "Account"):role/AmazonEKSFargatePodExecutionRole-${vpc_name} --subnets $subnets --selectors '{"namespace": "karpenter"}' || true
+ gen3_log_info "Installing karpenter using helm"
+ helm template karpenter-crd oci://public.ecr.aws/karpenter/karpenter-crd --version ${karpenter} --namespace "karpenter" | g3kubectl apply -f -
+ helm upgrade --install karpenter oci://public.ecr.aws/karpenter/karpenter --version ${karpenter} --namespace karpenter --wait \
+ --set settings.aws.defaultInstanceProfile=${vpc_name}_EKS_workers \
+ --set settings.aws.clusterEndpoint="${cluster_endpoint}" \
+ --set settings.aws.clusterName=${vpc_name} \
+ --set settings.aws.interruptionQueueName="${queue_name}" \
+ --set serviceAccount.name=karpenter \
+ --set serviceAccount.create=false \
+ --set controller.env[0].name=AWS_REGION \
+ --set controller.env[0].value=us-east-1 \
+ --set controller.resources.requests.memory="2Gi" \
+ --set controller.resources.requests.cpu="2" \
+ --set controller.resources.limits.memory="2Gi" \
+ --set controller.resources.limits.cpu="2"
+ fi
+ gen3 awsrole sa-annotate karpenter "karpenter-controller-role-$vpc_name" karpenter
+ gen3_log_info "Remove cluster-autoscaler"
+ gen3 kube-setup-autoscaler --remove
+ # Ensure that fluentd is updated if karpenter is deployed to prevent containerd logging issues
+ gen3 kube-setup-fluentd --force
+ gen3_update_karpenter_configs
+ fi
+}
+
+gen3_update_karpenter_configs() {
+ # depoloy node templates and provisioners if not set in manifest
+ if [[ -d $(g3k_manifest_init)/$(g3k_hostname)/manifests/karpenter ]]; then
+ gen3_log_info "karpenter manifest found, skipping node template and provisioner deployment"
+ # apply each manifest in the karpenter folder
+ for manifest in $(g3k_manifest_init)/$(g3k_hostname)/manifests/karpenter/*.yaml; do
+ g3k_kv_filter $manifest VPC_NAME ${vpc_name} | g3kubectl apply -f -
+ done
+ else
+ gen3_log_info "Adding node templates for karpenter"
+ g3k_kv_filter ${GEN3_HOME}/kube/services/karpenter/nodeTemplateDefault.yaml VPC_NAME ${vpc_name} | g3kubectl apply -f -
+ g3k_kv_filter ${GEN3_HOME}/kube/services/karpenter/nodeTemplateJupyter.yaml VPC_NAME ${vpc_name} | g3kubectl apply -f -
+ g3k_kv_filter ${GEN3_HOME}/kube/services/karpenter/nodeTemplateWorkflow.yaml VPC_NAME ${vpc_name} | g3kubectl apply -f -
+ if [[ $ARM ]]; then
+ gen3_log_info "Deploy binfmt daemonset so the emulation tools run on arm nodes"
+ # Deploy binfmt daemonset so the emulation tools run on arm nodes
+ g3kubectl apply -f ${GEN3_HOME}/kube/services/karpenter/binfmt.yaml
+ g3kubectl apply -f ${GEN3_HOME}/kube/services/karpenter/provisionerArm.yaml
+ else
+ g3kubectl apply -f ${GEN3_HOME}/kube/services/karpenter/provisionerDefault.yaml
+ fi
+ if [[ $GPU ]]; then
+ g3kubectl apply -f ${GEN3_HOME}/kube/services/karpenter/provisionerGPU.yaml
+ g3kubectl apply -f ${GEN3_HOME}/kube/services/karpenter/provisionerGPUShared.yaml
+ g3kubectl apply -f ${GEN3_HOME}/kube/services/karpenter/nodeTemplateGPU.yaml
+ helm repo add nvdp https://nvidia.github.io/k8s-device-plugin
+ helm repo update
+ helm upgrade -i nvdp nvdp/nvidia-device-plugin \
+ --namespace nvidia-device-plugin \
+ --create-namespace -f ${GEN3_HOME}/kube/services/karpenter/nvdp.yaml
+ fi
+ g3kubectl apply -f ${GEN3_HOME}/kube/services/karpenter/provisionerJupyter.yaml
+ g3kubectl apply -f ${GEN3_HOME}/kube/services/karpenter/provisionerWorkflow.yaml
+ fi
+}
+
+gen3_create_karpenter_sqs_eventbridge() {
+ local queue_name="$(gen3 api safe-name karpenter-sqs)"
+ local eventbridge_rule_name="karpenter-eventbridge-${vpc_name}"
+ gen3 sqs create-queue-if-not-exist karpenter-sqs >> "$XDG_RUNTIME_DIR/sqs-${vpc_name}.json"
+ local queue_url=$(cat "$XDG_RUNTIME_DIR/sqs-${vpc_name}.json" | jq -r '.url')
+ local queue_arn=$(cat "$XDG_RUNTIME_DIR/sqs-${vpc_name}.json" | jq -r '.arn')
+ # Create eventbridge rules
+ aws events put-rule --name "Karpenter-${vpc_name}-SpotInterruptionRule" --event-pattern '{"source": ["aws.ec2"], "detail-type": ["EC2 Spot Instance Interruption Warning"]}' 2> /dev/null
+ aws events put-rule --name "Karpenter-${vpc_name}-RebalanceRule" --event-pattern '{"source": ["aws.ec2"], "detail-type": ["EC2 Instance Rebalance Recommendation"]}' 2> /dev/null
+ aws events put-rule --name "Karpenter-${vpc_name}-ScheduledChangeRule" --event-pattern '{"source": ["aws.health"], "detail-type": ["AWS Health Event"]}' 2> /dev/null
+ aws events put-rule --name "Karpenter-${vpc_name}-InstanceStateChangeRule" --event-pattern '{"source": ["aws.ec2"], "detail-type": ["EC2 Instance State-change Notification"]}' 2> /dev/null
+ # Add SQS as a target for the eventbridge rules
+ aws events put-targets --rule "Karpenter-${vpc_name}-SpotInterruptionRule" --targets "Id"="1","Arn"="${queue_arn}" 2> /dev/null || true
+ aws events put-targets --rule "Karpenter-${vpc_name}-RebalanceRule" --targets "Id"="1","Arn"="${queue_arn}" 2> /dev/null || true
+ aws events put-targets --rule "Karpenter-${vpc_name}-ScheduledChangeRule" --targets "Id"="1","Arn"="${queue_arn}" 2> /dev/null || true
+ aws events put-targets --rule "Karpenter-${vpc_name}-InstanceStateChangeRule" --targets "Id"="1","Arn"="${queue_arn}" 2> /dev/null || true
+ aws sqs set-queue-attributes --queue-url "${queue_url}" --attributes "Policy"="$(aws sqs get-queue-attributes --queue-url "${queue_url}" --attribute-names "Policy" --query "Attributes.Policy" --output text | jq -r '.Statement += [{"Sid": "AllowKarpenter", "Effect": "Allow", "Principal": {"Service": ["sqs.amazonaws.com","events.amazonaws.com"]}, "Action": "sqs:SendMessage", "Resource": "'${queue_arn}'"}]')" 2> /dev/null || true
+ #g3k_kv_filter ${GEN3_HOME}/kube/services/karpenter/karpenter-global-settings.yaml SQS_NAME ${queue_name} | g3kubectl apply -f -
+}
+
+gen3_remove_karpenter() {
+ aws iam delete-role-policy --role-name "karpenter-controller-role-$vpc_name" --policy-name "karpenter-controller-policy" 1>&2 || true
+ aws iam delete-role --role-name "karpenter-controller-role-$vpc_name"
+ helm uninstall karpenter -n karpenter
+ g3kubectl delete namespace karpenter
+ gen3 kube-setup-autoscaler
+}
+
+#---------- main
+
+if [[ -z "$GEN3_SOURCE_ONLY" ]]; then
+ # Support sourcing this file for test suite
+ command="$1"
+ shift
+ case "$command" in
+ "deploy")
+ for flag in $@; do
+ if [[ $# -gt 0 ]]; then
+ flag="$1"
+ shift
+ fi
+ case "$flag" in
+ "--force")
+ FORCE=true
+ ;;
+ "--arm")
+ ARM=true
+ ;;
+ esac
+ done
+ gen3_deploy_karpenter
+ ;;
+ "remove")
+ gen3_remove_karpenter
+ ;;
+ "update")
+ gen3_update_karpenter_configs
+ ;;
+ *)
+ gen3_deploy_karpenter
+ ;;
+ esac
+fi
diff --git a/gen3/bin/kube-setup-kubecost.sh b/gen3/bin/kube-setup-kubecost.sh
index 07487672d..2166f051c 100644
--- a/gen3/bin/kube-setup-kubecost.sh
+++ b/gen3/bin/kube-setup-kubecost.sh
@@ -12,10 +12,11 @@ gen3_setup_kubecost_infrastructure() {
gen3 workon default "${vpc_name}__kubecost"
gen3 cd
echo "vpc_name=\"$vpc_name\"" > config.tfvars
- if [[ $deployment == "slave" ]]; then
- echo "cur_s3_bucket=\"$s3Bucket\"" >> config.tfvars
- elif [[ $deployment == "master" ]]; then
- echo "slave_account_id=\"$slaveAccountId\"" >> config.tfvars
+ if [[ ! -z "$curBucketCreated" ]]; then
+ echo "cur_s3_bucket=\"$curBucket\"" >> config.tfvars
+ fi
+ if [[ ! -z "$reportBucketCreated" ]]; then
+ echo "reports_s3_bucket=\"$reportBucket\"" >> config.tfvars
fi
gen3 tfplan 2>&1
gen3 tfapply 2>&1
@@ -38,22 +39,19 @@ gen3_setup_kubecost_service_account() {
aws iam attach-role-policy --role-name "$roleName" --policy-arn "arn:aws:iam::$accountID:policy/$vpc_name-Kubecost-CUR-policy" 1>&2
#gen3 awsrole sa-annotate "$saName" "$roleName" "kubecost"
kubectl delete sa -n kubecost $saName
+ # SA for reports
reportsRoleName="$vpc_name-opencost-report-role"
reportsSaName="reports-service-account"
gen3 awsrole create "$reportsRoleName" "$reportsSaName" "kubecost" || return 1
- aws iam attach-role-policy --role-name "$reportsRoleName" --policy-arn "arn:aws:iam::$accountID:policy/$vpc_name-Kubecost-Thanos-policy" 1>&2
+ aws iam attach-role-policy --role-name "$reportsRoleName" --policy-arn "arn:aws:iam::$accountID:policy/$vpc_name-Kubecost-report-policy" 1>&2
gen3 awsrole sa-annotate "$reportsSaName" "$reportsRoleName" "kubecost"
}
gen3_delete_kubecost_service_account() {
aws iam detach-role-policy --role-name "${vpc_name}-kubecost-user" --policy-arn "arn:aws:iam::$accountID:policy/$vpc_name-Kubecost-CUR-policy" 1>&2
- aws iam detach-role-policy --role-name "${vpc_name}-thanos-user" --policy-arn "arn:aws:iam::$accountID:policy/$vpc_name-Kubecost-Thanos-policy" 1>&2
gen3 workon default "${vpc_name}-kubecost-user_role"
gen3 tfplan --destroy 2>&1
gen3 tfapply 2>&1
- gen3 workon default "${vpc_name}-thanos-user_role"
- gen3 tfplan --destroy 2>&1
- gen3 tfapply 2>&1
}
gen3_delete_kubecost() {
@@ -63,68 +61,40 @@ gen3_delete_kubecost() {
}
gen3_kubecost_create_alb() {
- kubectl apply -f "${GEN3_HOME}/kube/services/kubecost-${deployment}/kubecost-alb.yaml" -n kubecost
+ kubectl apply -f "${GEN3_HOME}/kube/services/kubecost/kubecost-alb.yaml" -n kubecost
}
gen3_setup_kubecost() {
kubectl create namespace kubecost || true
+ # If s3 bucket not supplied, create a new one
+ if [[ -z $curBucket ]]; then
+ curBucket="$vpc_name-kubecost-bucket"
+ fi
+ # If report bucket not supplied, use the same as cur bucket
+ if [[ -z $reportBucket ]]; then
+ reportBucket=$curBucket
+ fi
gen3_setup_kubecost_infrastructure
+ aws ec2 create-spot-datafeed-subscription --bucket $curBucket --prefix spot-feed || true
# Change the SA permissions based on slave/master/standalone
if [[ -z $(kubectl get sa -n kubecost | grep $vpc_name-kubecost-user) ]]; then
gen3_setup_kubecost_service_account
fi
- # If master setup and s3 bucket not supplied, set terraform master s3 bucket name for thanos secret
- if [[ -z $s3Bucket ]]; then
- s3Bucket="$vpc_name-kubecost-bucket"
- fi
if (! helm status kubecost -n kubecost > /dev/null 2>&1 ) || [[ ! -z "$FORCE" ]]; then
- if [[ $deployment == "slave" ]]; then
- valuesFile="$XDG_RUNTIME_DIR/values_$$.yaml"
- valuesTemplate="${GEN3_HOME}/kube/services/kubecost-slave/values.yaml"
- thanosValuesFile="$XDG_RUNTIME_DIR/object-store.yaml"
- thanosValuesTemplate="${GEN3_HOME}/kube/services/kubecost-slave/object-store.yaml"
- thanosValues="${GEN3_HOME}/kube/services/kubecost-slave/values-thanos.yaml"
- g3k_kv_filter $valuesTemplate KUBECOST_TOKEN "${kubecostToken}" KUBECOST_SA "eks.amazonaws.com/role-arn: arn:aws:iam::$accountID:role/gen3_service/$roleName" THANOS_SA "$thanosSaName" ATHENA_BUCKET "s3://$s3Bucket" ATHENA_DATABASE "athenacurcfn_$vpc_name" ATHENA_TABLE "${vpc_name}_cur" AWS_ACCOUNT_ID "$accountID" AWS_REGION "$awsRegion" > $valuesFile
- elif [[ $deployment == "master" ]]; then
- valuesFile="$XDG_RUNTIME_DIR/values_$$.yaml"
- valuesTemplate="${GEN3_HOME}/kube/services/kubecost-master/values.yaml"
- thanosValuesFile="$XDG_RUNTIME_DIR/object-store.yaml"
- thanosValuesTemplate="${GEN3_HOME}/kube/services/kubecost-master/object-store.yaml"
- g3k_kv_filter $valuesTemplate KUBECOST_TOKEN "${kubecostToken}" KUBECOST_SA "eks.amazonaws.com/role-arn: arn:aws:iam::$accountID:role/gen3_service/$roleName" THANOS_SA "$thanosSaName" ATHENA_BUCKET "s3://$s3Bucket" ATHENA_DATABASE "athenacurcfn_$vpc_name" ATHENA_TABLE "${vpc_name}_cur" AWS_ACCOUNT_ID "$accountID" AWS_REGION "$awsRegion" > $valuesFile
- gen3_kubecost_create_alb
- else
- valuesFile="$XDG_RUNTIME_DIR/values_$$.yaml"
- valuesTemplate="${GEN3_HOME}/kube/services/kubecost-standalone/values.yaml"
- #thanosValuesFile="$XDG_RUNTIME_DIR/object-store.yaml"
- #thanosValuesTemplate="${GEN3_HOME}/kube/services/kubecost-standalone/object-store.yaml"
- g3k_kv_filter $valuesTemplate KUBECOST_TOKEN "${kubecostToken}" KUBECOST_SA "eks.amazonaws.com/role-arn: arn:aws:iam::$accountID:role/gen3_service/$roleName" THANOS_SA "$thanosSaName" ATHENA_BUCKET "s3://$s3Bucket" ATHENA_DATABASE "athenacurcfn_$vpc_name" ATHENA_TABLE "${vpc_name}_cur" AWS_ACCOUNT_ID "$accountID" AWS_REGION "$awsRegion" > $valuesFile
- gen3_kubecost_create_alb
- fi
- #kubectl delete secret -n kubecost kubecost-thanos || true
- #kubectl delete secret -n kubecost thanos || true
- #g3k_kv_filter $thanosValuesTemplate AWS_REGION $awsRegion KUBECOST_S3_BUCKET $s3Bucket > $thanosValuesFile
- #kubectl create secret generic kubecost-thanos -n kubecost --from-file=$thanosValuesFile
- #kubectl create secret generic thanos -n kubecost --from-file=$thanosValuesFile
- # Need to setup thanos config
- gen3 kube-setup-certs
- gen3 kube-setup-prometheus
- g3kubectl delete secret -n kubecost cert-kubecost-cost-analyzer || true
- g3kubectl create secret generic "cert-kubecost-cost-analyzer" "--from-file=tls.crt=$(gen3_secrets_folder)/credentials/kubecost-cost-analyzer-service.crt" "--from-file=tls.key=$(gen3_secrets_folder)/credentials/kubecost-cost-analyzer-service.key" -n kubecost || true
+ valuesFile="$XDG_RUNTIME_DIR/values_$$.yaml"
+ valuesTemplate="${GEN3_HOME}/kube/services/kubecost/values.yaml"
+ g3k_kv_filter $valuesTemplate KUBECOST_SA "eks.amazonaws.com/role-arn: arn:aws:iam::$accountID:role/gen3_service/$roleName" ATHENA_BUCKET "$curBucket" ATHENA_DATABASE "athenacurcfn_$vpc_name" ATHENA_TABLE "${vpc_name}_cur" AWS_ACCOUNT_ID "$accountID" AWS_REGION "$awsRegion" > $valuesFile
helm repo add kubecost https://kubecost.github.io/cost-analyzer/ --force-update 2> >(grep -v 'This is insecure' >&2)
helm repo update 2> >(grep -v 'This is insecure' >&2)
- if [[ -z $disablePrometheus ]]; then
- helm upgrade --install kubecost kubecost/cost-analyzer -n kubecost -f ${valuesFile}
- else
- helm upgrade --install kubecost kubecost/cost-analyzer -n kubecost -f ${valuesFile}
- fi
+ helm upgrade --install kubecost kubecost/cost-analyzer -n kubecost -f ${valuesFile}
else
gen3_log_info "kube-setup-kubecost exiting - kubecost already deployed, use --force true to redeploy"
fi
- gen3_setup_reports_cronjob
+ gen3_kubecost_create_alb
}
-gen3_setup_reports_cronjob {
- gen3 job cron opencost-report '0 0 * * 0' BUCKET_NAME $s3Bucket
+gen3_setup_reports_cronjob() {
+ gen3 job cron opencost-report-argo '0 0 * * 0' BUCKET_NAME $reportBucket
}
if [[ -z "$GEN3_SOURCE_ONLY" ]]; then
@@ -135,170 +105,29 @@ if [[ -z "$GEN3_SOURCE_ONLY" ]]; then
command="$1"
shift
case "$command" in
- "master")
- deployment="master"
- subcommand=""
- if [[ $# -gt 0 ]]; then
- subcommand="$1"
- shift
- fi
- case "$subcommand" in
- "create")
- for flag in $@; do
- if [[ $# -gt 0 ]]; then
- flag="$1"
- shift
- fi
- case "$flag" in
- "--slave-account-id")
- slaveAccountId="$1"
- ;;
- "--kubecost-token")
- kubecostToken="$1"
- ;;
- "--force")
- if [[ $(echo $1 | tr '[:upper:]' '[:lower:]') == "true" ]]; then
- FORCE=true
- fi
- ;;
- "--disable-prometheus")
- if [[ $(echo $1 | tr '[:upper:]' '[:lower:]') == "true" ]]; then
- disablePrometheus=true
- fi
- ;;
- "--prometheus-namespace")
- prometheusNamespace="$1"
- ;;
- "--prometheus-service")
- prometheusService="$1"
- ;;
- esac
- done
- if [[ -z $slaveAccountId || -z $kubecostToken ]]; then
- gen3_log_err "Please ensure you set the required flags."
- exit 1
- fi
- if [[ $disablePrometheus == true && -z $prometheusNamespace && -z $prometheusService ]]; then
- gen3_log_err "If you disable prometheus, set the flags for the local prometheus namespace and service name."
- exit 1
- fi
- gen3_setup_kubecost "$@"
- ;;
- "alb")
- gen3_kubecost_create_alb
- ;;
- *)
- gen3_log_err "gen3_logs" "invalid history subcommand $subcommand - try: gen3 help kube-setup-kubecost"
- ;;
- esac
- ;;
- "slave")
- deployment="slave"
- subcommand=""
- if [[ $# -gt 0 ]]; then
- subcommand="$1"
- shift
- fi
- case "$subcommand" in
- "create")
- for flag in $@; do
- if [[ $# -gt 0 ]]; then
- flag="$1"
- shift
- fi
- case "$flag" in
- "--s3-bucket")
- s3Bucket="$1"
- ;;
- "--kubecost-token")
- kubecostToken="$1"
- ;;
- "--force")
- if [[ $(echo $1 | tr '[:upper:]' '[:lower:]') == "true" ]]; then
- FORCE=true
- fi
- ;;
- "--disable-prometheus")
- if [[ $(echo $1 | tr '[:upper:]' '[:lower:]') == "true" ]]; then
- disablePrometheus=true
- fi
- ;;
- "--prometheus-namespace")
- prometheusNamespace="$1"
- ;;
- "--prometheus-service")
- prometheusService="$1"
- ;;
- esac
- done
- if [[ -z $s3Bucket || -z $kubecostToken ]]; then
- gen3_log_err "Please ensure you set the required flags."
- exit 1
- fi
- if [[ $disablePrometheus == true && -z $prometheusNamespace && -z $prometheusService ]]; then
- gen3_log_err "If you disable prometheus, set the flags for the local prometheus namespace and service name."
- exit 1
- fi
- gen3_setup_kubecost "$@"
- ;;
- *)
- gen3_log_err "gen3_logs" "invalid history subcommand $subcommand - try: gen3 help kube-setup-kubecost"
- ;;
- esac
- ;;
- "standalone")
- deployment="standalone"
- subcommand=""
- if [[ $# -gt 0 ]]; then
- subcommand="$1"
- shift
- fi
- case "$subcommand" in
- "create")
- for flag in $@; do
- if [[ $# -gt 0 ]]; then
- flag="$1"
- shift
+ "create")
+ for flag in $@; do
+ if [[ $# -gt 0 ]]; then
+ flag="$1"
+ shift
+ fi
+ case "$flag" in
+ "--force")
+ if [[ $(echo $1 | tr '[:upper:]' '[:lower:]') == "true" ]]; then
+ FORCE=true
fi
- case "$flag" in
- "--kubecost-token")
- kubecostToken="$1"
- ;;
- "--force")
- if [[ $(echo $1 | tr '[:upper:]' '[:lower:]') == "true" ]]; then
- FORCE=true
- fi
- ;;
- "--disable-prometheus")
- if [[ $(echo $1 | tr '[:upper:]' '[:lower:]') == "true" ]]; then
- disablePrometheus=true
- fi
- ;;
- "--prometheus-namespace")
- prometheusNamespace="$1"
- ;;
- "--prometheus-service")
- prometheusService="$1"
- ;;
- esac
- done
- if [[ -z $kubecostToken ]]; then
- gen3_log_err "Please ensure you set the required flags."
- exit 1
- fi
- if [[ $disablePrometheus == true && -z $prometheusNamespace && -z $prometheusService ]]; then
- gen3_log_err "If you disable prometheus, set the flags for the local prometheus namespace and service name."
- exit 1
- fi
- gen3_setup_kubecost "$@"
- ;;
- "alb")
- gen3_kubecost_create_alb
- ;;
- *)
- gen3_log_err "gen3_logs" "invalid history subcommand $subcommand - try: gen3 help kube-setup-kubecost"
- ;;
- esac
+ ;;
+ "--cur-bucket")
+ curBucket="$1"
+ curBucketCreated=true
+ ;;
+ "--report-bucket")
+ reportBucket="$1"
+ reportBucketCreated=true
+ ;;
+ esac
+ done
+ gen3_setup_kubecost "$@"
;;
"cronjob")
subcommand=""
@@ -314,13 +143,13 @@ if [[ -z "$GEN3_SOURCE_ONLY" ]]; then
shift
fi
case "$flag" in
- "--s3-bucket")
- s3Bucket="$1"
+ "--report-bucket")
+ reportBucket="$1"
;;
esac
done
- if [[ -z $s3Bucket ]]; then
- gen3_log_err "Please ensure you set the s3Bucket for setting up cronjob without full opencost deployment."
+ if [[ -z $reportBucket ]]; then
+ gen3_log_err "Please ensure you set the reportBucket for setting up cronjob without full opencost deployment."
exit 1
fi
gen3_setup_reports_cronjob
@@ -334,8 +163,7 @@ if [[ -z "$GEN3_SOURCE_ONLY" ]]; then
gen3_delete_kubecost
;;
*)
- gen3_log_err "gen3_logs" "invalid command $command"
- gen3_kubecost_help
+ gen3_setup_kubecost "$@"
;;
esac
fi
diff --git a/gen3/bin/kube-setup-manifestservice.sh b/gen3/bin/kube-setup-manifestservice.sh
index a3df01d42..ccbde4691 100644
--- a/gen3/bin/kube-setup-manifestservice.sh
+++ b/gen3/bin/kube-setup-manifestservice.sh
@@ -12,28 +12,23 @@ gen3_load "gen3/gen3setup"
hostname="$(gen3 api hostname)"
bucketname="manifest-${hostname//./-}"
-username="manifest-bot-${hostname//./-}"
+username="manifestbot-${hostname//./-}"
mkdir -p $(gen3_secrets_folder)/g3auto/manifestservice
credsFile="$(gen3_secrets_folder)/g3auto/manifestservice/config.json"
+gen3_log_info "kube-setup-manifestservice" "setting up manifest-service resources"
+gen3 s3 create "$bucketname" || true
+gen3 awsrole create ${username} manifestservice-sa || true
+gen3 s3 attach-bucket-policy "$bucketname" --read-write --role-name ${username} || true
if (! (g3kubectl describe secret manifestservice-g3auto 2> /dev/null | grep config.js > /dev/null 2>&1)) \
- && [[ (! -f "$credsFile") && -z "$JENKINS_HOME" ]];
+ && [[ (! -f "$credsFile") && -z "$JENKINS_HOME" ]];
then
- gen3_log_info "kube-setup-manifestservice" "setting up manifest-service resources"
- gen3 s3 create "$bucketname"
- gen3 awsuser create ${username}
- gen3 s3 attach-bucket-policy "$bucketname" --read-write --user-name ${username}
gen3_log_info "initializing manifestservice config.json"
- user=$(gen3 secrets decode ${username}-g3auto awsusercreds.json)
- key_id=$(jq -r .id <<< $user)
- access_key=$(jq -r .secret <<< $user)
cat - > "$credsFile" < /dev/null 2>&1
+ secrets=$(g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-create --client metadata-delete-expired-objects-job --grant-types client_credentials | tail -1)
+ if [[ ! $secrets =~ (\'(.*)\', \'(.*)\') ]]; then
+ gen3_log_err "kube-setup-metadata-delete-expired-objects-job" "Failed generating oidc client: $secrets"
+ return 1
+ fi
+ fi
+ local client_id="${BASH_REMATCH[2]}"
+ local client_secret="${BASH_REMATCH[3]}"
+
+ gen3_log_info "create metadata-delete-expired-objects secret"
+ mkdir -m 0700 -p "$(gen3_secrets_folder)/g3auto/metadata-delete-expired-objects"
+
+ cat - > "$secretsFolder/config.json" < /dev/null 2>&1; then
diff --git a/gen3/bin/kube-setup-metrics.sh b/gen3/bin/kube-setup-metrics.sh
index ca287197a..139c9679c 100644
--- a/gen3/bin/kube-setup-metrics.sh
+++ b/gen3/bin/kube-setup-metrics.sh
@@ -17,7 +17,7 @@
source "${GEN3_HOME}/gen3/lib/utils.sh"
gen3_load "gen3/gen3setup"
-DESIRED_VERSION=0.3.7
+DESIRED_VERSION=0.6.2
CURRENT_VERSION=$(kubectl get deployment -n kube-system metrics-server -o json | jq -r .spec.template.spec.containers[0].image | awk -F :v '{print $2}')
gen3_metrics_deploy() {
@@ -47,4 +47,4 @@ case "$command" in
gen3_log_err "unknown option: $command"
gen3 help kube-setup-metrics
;;
-esac
\ No newline at end of file
+esac
diff --git a/gen3/bin/kube-setup-ohdsi.sh b/gen3/bin/kube-setup-ohdsi.sh
index d586570db..3d8165547 100644
--- a/gen3/bin/kube-setup-ohdsi.sh
+++ b/gen3/bin/kube-setup-ohdsi.sh
@@ -14,13 +14,8 @@ new_client() {
local secrets=$(g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-create --client atlas --urls https://${atlas_hostname}/WebAPI/user/oauth/callback?client_name=OidcClient --username atlas --allowed-scopes openid profile email user | tail -1)
# secrets looks like ('CLIENT_ID', 'CLIENT_SECRET')
if [[ ! $secrets =~ (\'(.*)\', \'(.*)\') ]]; then
- # try delete client
- g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-delete --client atlas > /dev/null 2>&1
- secrets=$(g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-create --client atlas --urls https://${atlas_hostname}/WebAPI/user/oauth/callback?client_name=OidcClient --username atlas --allowed-scopes openid profile email user | tail -1)
- if [[ ! $secrets =~ (\'(.*)\', \'(.*)\') ]]; then
- gen3_log_err "kube-setup-ohdsi" "Failed generating oidc client for atlas: $secrets"
- return 1
- fi
+ gen3_log_err "kube-setup-ohdsi" "Failed generating oidc client for atlas: $secrets"
+ return 1
fi
local FENCE_CLIENT_ID="${BASH_REMATCH[2]}"
local FENCE_CLIENT_SECRET="${BASH_REMATCH[3]}"
@@ -87,6 +82,8 @@ setup_secrets() {
export DB_HOST=$(jq -r ".db_host" <<< "$dbcreds")
export FENCE_URL="https://${hostname}/user/user"
+ # get arborist_url from manifest.json:
+ export ARBORIST_URL=$(g3k_manifest_lookup .global.arborist_url)
export FENCE_METADATA_URL="https://${hostname}/.well-known/openid-configuration"
export FENCE_CLIENT_ID=$(jq -r ".FENCE_CLIENT_ID" <<< "$appcreds")
export FENCE_CLIENT_SECRET=$(jq -r ".FENCE_CLIENT_SECRET" <<< "$appcreds")
diff --git a/gen3/bin/kube-setup-pdb.sh b/gen3/bin/kube-setup-pdb.sh
new file mode 100644
index 000000000..e29b2e1e6
--- /dev/null
+++ b/gen3/bin/kube-setup-pdb.sh
@@ -0,0 +1,38 @@
+#!/bin/bash
+#
+# Apply pods diruption budgets to the core services of the commons
+#
+
+source "${GEN3_HOME}/gen3/lib/utils.sh"
+gen3_load "gen3/gen3setup"
+
+serverVersion="$(g3kubectl version -o json | jq -r '.serverVersion.major + "." + .serverVersion.minor' | head -c4)"
+echo "Server version $serverVersion"
+if [ "$serverVersion" \< "1.21" ]; then
+ gen3_log_info "kube-setup-pdb" "K8s server version $serverVersion does not support pod disruption budgets. Server must be version 1.21 or higher"
+ exit 0
+fi
+
+deployments=$(kubectl get deployments | awk '{print $1}' | tail -n +2)
+
+if [[ "$(g3k_manifest_lookup .global.pdb)" == "on" ]]; then
+ for deployment in $deployments
+ do
+ replicas=$(kubectl get deployment $deployment -o=jsonpath='{.spec.replicas}')
+ if [[ "$replicas" -gt "1" ]]; then
+ echo "There were $replicas replicas"
+ service=$(echo "$deployment" | awk -F '-' '{print $1}')
+ echo "We are on the $service service"
+ filePath="${GEN3_HOME}/kube/services/pod-disruption-budget/${service}.yaml"
+ if [[ -f "$filePath" ]]; then
+ g3kubectl apply -f "$filePath"
+ else
+ echo "No PDB file found for service $service"
+ fi
+ else
+ echo "Skipping PDB for deployment $deployment because it has only 1 replica"
+ fi
+ done
+ else
+ echo "You need to set pdb = 'on' in the manifest.json"
+fi
\ No newline at end of file
diff --git a/gen3/bin/kube-setup-pelicanjob.sh b/gen3/bin/kube-setup-pelicanjob.sh
index 930985cb8..907b9f045 100644
--- a/gen3/bin/kube-setup-pelicanjob.sh
+++ b/gen3/bin/kube-setup-pelicanjob.sh
@@ -24,14 +24,36 @@ if ! g3kubectl describe secret pelicanservice-g3auto | grep config.json > /dev/n
user=$(gen3 secrets decode $awsuser-g3auto awsusercreds.json)
key_id=$(jq -r .id <<< $user)
access_key=$(jq -r .secret <<< $user)
+
+ # setup fence OIDC client with client_credentials grant for access to MDS API
+ hostname=$(gen3 api hostname)
+ gen3_log_info "kube-setup-sower-jobs" "creating fence oidc client for $hostname"
+ secrets=$(g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-create --client pelican-export-job --grant-types client_credentials | tail -1)
+ # secrets looks like ('CLIENT_ID', 'CLIENT_SECRET')
+ if [[ ! $secrets =~ (\'(.*)\', \'(.*)\') ]]; then
+ # try delete client
+ g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-delete --client pelican-export-job > /dev/null 2>&1
+ secrets=$(g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-create --client pelican-export-job --grant-types client_credentials | tail -1)
+ if [[ ! $secrets =~ (\'(.*)\', \'(.*)\') ]]; then
+ gen3_log_err "kube-setup-sower-jobs" "Failed generating oidc client: $secrets"
+ return 1
+ fi
+ fi
+ pelican_export_client_id="${BASH_REMATCH[2]}"
+ pelican_export_client_secret="${BASH_REMATCH[3]}"
+
cat - > "$credsFile" < /dev/null 2>&1; then
- credsFile=$(mktemp -p "$XDG_RUNTIME_DIR" "creds.json_XXXXXX")
- creds="$(base64 /dev/urandom | head -c 12)"
- if [[ "$creds" != null ]]; then
- echo ${creds} >> "$credsFile"
- g3kubectl create secret generic grafana-admin "--from-file=credentials=${credsFile}"
- rm -f ${credsFile}
- else
- echo "WARNING: there was an error creating the secrets for grafana"
- fi
- fi
-}
+# function create_grafana_secrets()
+# {
+# if ! g3kubectl get secrets/grafana-admin > /dev/null 2>&1; then
+# credsFile=$(mktemp -p "$XDG_RUNTIME_DIR" "creds.json_XXXXXX")
+# creds="$(base64 /dev/urandom | head -c 12)"
+# if [[ "$creds" != null ]]; then
+# echo ${creds} >> "$credsFile"
+# g3kubectl create secret generic grafana-admin "--from-file=credentials=${credsFile}"
+# rm -f ${credsFile}
+# else
+# echo "WARNING: there was an error creating the secrets for grafana"
+# fi
+# fi
+# }
function deploy_prometheus()
{
@@ -60,7 +64,7 @@ function deploy_prometheus()
if (! g3kubectl get namespace monitoring> /dev/null 2>&1);
then
g3kubectl create namespace monitoring
- g3kubectl label namespace namespace app=prometheus
+ g3kubectl label namespace monitoring app=prometheus
fi
if (g3kubectl --namespace=monitoring get deployment prometheus-server > /dev/null 2>&1);
@@ -71,44 +75,48 @@ function deploy_prometheus()
if ! g3kubectl get storageclass prometheus > /dev/null 2>&1; then
g3kubectl apply -f "${GEN3_HOME}/kube/services/monitoring/prometheus-storageclass.yaml"
fi
- deploy_thanos
+ if [ "$argocd" = true ]; then
+ g3kubectl apply -f "$GEN3_HOME/kube/services/monitoring/prometheus-application.yaml" --namespace=argocd
+ else
gen3 arun helm upgrade --install prometheus prometheus-community/kube-prometheus-stack --namespace monitoring -f "${GEN3_HOME}/kube/services/monitoring/values.yaml"
+ fi
+ deploy_thanos
else
gen3_log_info "Prometheus is already installed, use --force to try redeploying"
fi
}
-function deploy_grafana()
-{
- helm_repository
- if (! g3kubectl get namespace grafana > /dev/null 2>&1);
- then
- g3kubectl create namespace grafana
- g3kubectl label namespace grafana app=grafana
- fi
+# function deploy_grafana()
+# {
+# helm_repository
+# if (! g3kubectl get namespace grafana > /dev/null 2>&1);
+# then
+# g3kubectl create namespace grafana
+# g3kubectl label namespace grafana app=grafana
+# fi
- #create_grafana_secrets
- TMPGRAFANAVALUES=$(mktemp -p "$XDG_RUNTIME_DIR" "grafana.json_XXXXXX")
- ADMINPASS=$(g3kubectl get secrets grafana-admin -o json |jq .data.credentials -r |base64 -d)
- yq '.adminPassword = "'${ADMINPASS}'"' "${GEN3_HOME}/kube/services/monitoring/grafana-values.yaml" --yaml-output > ${TMPGRAFANAVALUES}
- # curl -o grafana-values.yaml https://raw.githubusercontent.com/helm/charts/master/stable/grafana/values.yaml
+# #create_grafana_secrets
+# TMPGRAFANAVALUES=$(mktemp -p "$XDG_RUNTIME_DIR" "grafana.json_XXXXXX")
+# ADMINPASS=$(g3kubectl get secrets grafana-admin -o json |jq .data.credentials -r |base64 -d)
+# yq '.adminPassword = "'${ADMINPASS}'"' "${GEN3_HOME}/kube/services/monitoring/grafana-values.yaml" --yaml-output > ${TMPGRAFANAVALUES}
+# # curl -o grafana-values.yaml https://raw.githubusercontent.com/helm/charts/master/stable/grafana/values.yaml
- if (! g3kubectl --namespace=grafana get deployment grafana > /dev/null 2>&1) || [[ "$1" == "--force" ]]; then
- if ( g3kubectl --namespace=grafana get deployment grafana > /dev/null 2>&1);
- then
- delete_grafana
- fi
+# if (! g3kubectl --namespace=grafana get deployment grafana > /dev/null 2>&1) || [[ "$1" == "--force" ]]; then
+# if ( g3kubectl --namespace=grafana get deployment grafana > /dev/null 2>&1);
+# then
+# delete_grafana
+# fi
- local HOSTNAME
- HOSTNAME=$(gen3 api hostname)
+# local HOSTNAME
+# HOSTNAME=$(gen3 api hostname)
- g3k_kv_filter "${TMPGRAFANAVALUES}" DOMAIN ${HOSTNAME} | gen3 arun helm upgrade --install grafana stable/grafana --namespace grafana -f -
- gen3 kube-setup-revproxy
- else
- echo "Grafana is already installed, use --force to try redeploying"
- fi
-}
+# g3k_kv_filter "${TMPGRAFANAVALUES}" DOMAIN ${HOSTNAME} | gen3 arun helm upgrade --install grafana stable/grafana --namespace grafana -f -
+# gen3 kube-setup-revproxy
+# else
+# echo "Grafana is already installed, use --force to try redeploying"
+# fi
+# }
function deploy_thanos() {
if [[ -z $vpc_name ]]; then
@@ -137,11 +145,11 @@ case "$command" in
prometheus)
deploy_prometheus "$@"
;;
- grafana)
- deploy_grafana "$@"
- ;;
+ # grafana)
+ # deploy_grafana "$@"
+ # ;;
*)
deploy_prometheus "$@"
- deploy_grafana "$@"
+ # deploy_grafana "$@"
;;
esac
diff --git a/gen3/bin/kube-setup-requestor.sh b/gen3/bin/kube-setup-requestor.sh
index 7bcc1e644..b4b8ae0e2 100644
--- a/gen3/bin/kube-setup-requestor.sh
+++ b/gen3/bin/kube-setup-requestor.sh
@@ -20,7 +20,7 @@ setup_database() {
fi
# Setup config file that requestor consumes
local secretsFolder="$(gen3_secrets_folder)/g3auto/requestor"
- if [[ ! -f "$secretsFolder/requestor-config.yaml" || ! -f "$secretsFolder/base64Authz.txt" ]]; then
+ if [[ ! -f "$secretsFolder/requestor-config.yaml" ]]; then
if [[ ! -f "$secretsFolder/dbcreds.json" ]]; then
if ! gen3 db setup requestor; then
gen3_log_err "Failed setting up database for requestor service"
@@ -44,8 +44,6 @@ DB_USER: $(jq -r .db_username < "$secretsFolder/dbcreds.json")
DB_PASSWORD: $(jq -r .db_password < "$secretsFolder/dbcreds.json")
DB_DATABASE: $(jq -r .db_database < "$secretsFolder/dbcreds.json")
EOM
- # make it easy for nginx to get the Authorization header ...
- # echo -n "gateway:$password" | base64 > "$secretsFolder/base64Authz.txt"
fi
gen3 secrets sync 'setup requestor-g3auto secrets'
}
diff --git a/gen3/bin/kube-setup-revproxy.sh b/gen3/bin/kube-setup-revproxy.sh
index 0b6ee74d7..fd30b478b 100644
--- a/gen3/bin/kube-setup-revproxy.sh
+++ b/gen3/bin/kube-setup-revproxy.sh
@@ -111,50 +111,59 @@ for name in $(g3kubectl get services -o json | jq -r '.items[] | .metadata.name'
fi
done
-if g3kubectl get namespace argo > /dev/null 2>&1;
+
+if g3k_manifest_lookup .argo.argo_server_service_url 2> /dev/null; then
+ argo_server_service_url=$(g3k_manifest_lookup .argo.argo_server_service_url)
+ g3k_kv_filter "${scriptDir}/gen3.nginx.conf/argo-server.conf" SERVICE_URL "${argo_server_service_url}" > /tmp/argo-server-with-url$(gen3 db namespace).conf
+ filePath="/tmp/argo-server-with-url$(gen3 db namespace).conf"
+ if [[ -f "$filePath" ]]; then
+ confFileList+=("--from-file" "$filePath")
+ fi
+fi
+
+if g3kubectl get namespace argocd > /dev/null 2>&1;
then
- for argo in $(g3kubectl get services -n argo -o jsonpath='{.items[*].metadata.name}');
- do
- filePath="$scriptDir/gen3.nginx.conf/${argo}.conf"
+ filePath="$scriptDir/gen3.nginx.conf/argocd-server.conf"
if [[ -f "$filePath" ]]; then
confFileList+=("--from-file" "$filePath")
fi
- done
fi
-if [[ $current_namespace == "default" ]];
+if g3kubectl get namespace monitoring > /dev/null 2>&1;
then
- if g3kubectl get namespace prometheus > /dev/null 2>&1;
- then
- for prometheus in $(g3kubectl get services -n prometheus -o jsonpath='{.items[*].metadata.name}');
- do
- filePath="$scriptDir/gen3.nginx.conf/${prometheus}.conf"
- if [[ -f "$filePath" ]]; then
- confFileList+=("--from-file" "$filePath")
- fi
- done
- fi
+ filePath="$scriptDir/gen3.nginx.conf/prometheus-server.conf"
+ if [[ -f "$filePath" ]]; then
+ confFileList+=("--from-file" "$filePath")
+ fi
fi
-#echo "${confFileList[@]}" $BASHPID
-if [[ $current_namespace == "default" ]]; then
- if g3kubectl get namespace grafana > /dev/null 2>&1; then
- for grafana in $(g3kubectl get services -n grafana -o jsonpath='{.items[*].metadata.name}');
- do
- filePath="$scriptDir/gen3.nginx.conf/${grafana}.conf"
- touch "${XDG_RUNTIME_DIR}/${grafana}.conf"
- tmpCredsFile="${XDG_RUNTIME_DIR}/${grafana}.conf"
- adminPass=$(g3kubectl get secrets grafana-admin -o json |jq .data.credentials -r |base64 -d)
- adminCred=$(echo -n "admin:${adminPass}" | base64 --wrap=0)
- sed "s/CREDS/${adminCred}/" ${filePath} > ${tmpCredsFile}
- if [[ -f "${tmpCredsFile}" ]]; then
- confFileList+=("--from-file" "${tmpCredsFile}")
- fi
- #rm -f ${tmpCredsFile}
- done
- fi
+if g3kubectl get namespace kubecost > /dev/null 2>&1;
+then
+ filePath="$scriptDir/gen3.nginx.conf/kubecost-service.conf"
+ if [[ -f "$filePath" ]]; then
+ confFileList+=("--from-file" "$filePath")
+ fi
fi
+# #echo "${confFileList[@]}" $BASHPID
+# if [[ $current_namespace == "default" ]]; then
+# if g3kubectl get namespace grafana > /dev/null 2>&1; then
+# for grafana in $(g3kubectl get services -n grafana -o jsonpath='{.items[*].metadata.name}');
+# do
+# filePath="$scriptDir/gen3.nginx.conf/${grafana}.conf"
+# touch "${XDG_RUNTIME_DIR}/${grafana}.conf"
+# tmpCredsFile="${XDG_RUNTIME_DIR}/${grafana}.conf"
+# adminPass=$(g3kubectl get secrets grafana-admin -o json |jq .data.credentials -r |base64 -d)
+# adminCred=$(echo -n "admin:${adminPass}" | base64 --wrap=0)
+# sed "s/CREDS/${adminCred}/" ${filePath} > ${tmpCredsFile}
+# if [[ -f "${tmpCredsFile}" ]]; then
+# confFileList+=("--from-file" "${tmpCredsFile}")
+# fi
+# #rm -f ${tmpCredsFile}
+# done
+# fi
+# fi
+
if g3k_manifest_lookup .global.document_url > /dev/null 2>&1; then
documentUrl="$(g3k_manifest_lookup .global.document_url)"
if [[ "$documentUrl" != null ]]; then
diff --git a/gen3/bin/kube-setup-roles.sh b/gen3/bin/kube-setup-roles.sh
index 040aaca05..aba7bf402 100644
--- a/gen3/bin/kube-setup-roles.sh
+++ b/gen3/bin/kube-setup-roles.sh
@@ -12,6 +12,8 @@ gen3_load "gen3/gen3setup"
g3kubectl patch serviceaccount default -p 'automountServiceAccountToken: false'
g3kubectl patch serviceaccount --namespace "$(gen3 jupyter j-namespace)" default -p 'automountServiceAccountToken: false' > /dev/null || true
+namespace="$(gen3 api namespace)"
+
# Don't do this in a Jenkins job
if [[ -z "$JENKINS_HOME" ]]; then
if ! g3kubectl get serviceaccounts/useryaml-job > /dev/null 2>&1; then
@@ -29,10 +31,10 @@ if [[ -z "$JENKINS_HOME" ]]; then
roleName="$(gen3 api safe-name gitops)"
gen3 awsrole create "$roleName" gitops-sa
# do this here, since we added the new role to this binding
- g3kubectl apply -f "${GEN3_HOME}/kube/services/jenkins/rolebinding-devops.yaml"
+ g3k_kv_filter ${GEN3_HOME}/kube/services/jenkins/rolebinding-devops.yaml CURRENT_NAMESPACE "$namespace"|g3kubectl apply -f -
fi
if ! g3kubectl get rolebindings/devops-binding > /dev/null 2>&1; then
- g3kubectl apply -f "${GEN3_HOME}/kube/services/jenkins/rolebinding-devops.yaml"
+ g3k_kv_filter ${GEN3_HOME}/kube/services/jenkins/rolebinding-devops.yaml CURRENT_NAMESPACE "$namespace"|g3kubectl apply -f -
fi
ctx="$(g3kubectl config current-context)"
diff --git a/gen3/bin/kube-setup-sheepdog.sh b/gen3/bin/kube-setup-sheepdog.sh
index b72d36690..7eec86def 100644
--- a/gen3/bin/kube-setup-sheepdog.sh
+++ b/gen3/bin/kube-setup-sheepdog.sh
@@ -42,8 +42,8 @@ if [[ -z "$JENKINS_HOME" && -f "$(gen3_secrets_folder)/creds.json" ]]; then
if gen3_time_since postgres_checkup is 120; then
# Grant permissions to peregrine
sqlList=(
- "GRANT SELECT ON ALL TABLES IN SCHEMA public TO $peregrine_db_user;"
- "ALTER DEFAULT PRIVILEGES GRANT SELECT ON TABLES TO $peregrine_db_user;"
+ "GRANT SELECT ON ALL TABLES IN SCHEMA public TO \"$peregrine_db_user\";"
+ "ALTER DEFAULT PRIVILEGES GRANT SELECT ON TABLES TO \"$peregrine_db_user\";"
);
for sql in "${sqlList[@]}"; do
gen3_log_info "Running: $sql"
diff --git a/gen3/bin/kube-setup-system-services.sh b/gen3/bin/kube-setup-system-services.sh
index 7a75a33f8..c26a04cb5 100644
--- a/gen3/bin/kube-setup-system-services.sh
+++ b/gen3/bin/kube-setup-system-services.sh
@@ -16,10 +16,10 @@
source "${GEN3_HOME}/gen3/lib/utils.sh"
gen3_load "gen3/gen3setup"
-kubeproxy=${kubeproxy:-1.16.13}
-coredns=${coredns:-1.6.6}
+kubeproxy=${kubeproxy:-1.24.7}
+coredns=${coredns:-1.8.7}
kubednsautoscaler=${kubednsautoscaler:-1.8.6}
-cni=${cni:-1.11.0}
+cni=${cni:-1.14.1}
calico=${calico:-1.7.8}
@@ -31,7 +31,7 @@ while [ $# -gt 0 ]; do
shift
done
-kube_proxy_image="602401143452.dkr.ecr.us-east-1.amazonaws.com/eks/kube-proxy:v${kubeproxy}-eksbuild.1"
+kube_proxy_image="602401143452.dkr.ecr.us-east-1.amazonaws.com/eks/kube-proxy:v${kubeproxy}-eksbuild.2"
coredns_image="602401143452.dkr.ecr.us-east-1.amazonaws.com/eks/coredns:v${coredns}"
kubednsautoscaler_image="k8s.gcr.io/cpa/cluster-proportional-autoscaler:${kubednsautoscaler}"
cni_image="https://raw.githubusercontent.com/aws/amazon-vpc-cni-k8s/v${cni}/config/master/aws-k8s-cni.yaml"
@@ -39,7 +39,7 @@ calico_yaml="https://raw.githubusercontent.com/aws/amazon-vpc-cni-k8s/v${calico}
g3kubectl set image daemonset.apps/kube-proxy -n kube-system kube-proxy=${kube_proxy_image}
g3kubectl set image --namespace kube-system deployment.apps/coredns coredns=${coredns_image}
-g3k_kv_filter "${GEN3_HOME}/kube/services/kube-dns-autoscaler/dns-horizontal-autoscaler.yaml" SERVICE "coredns" IMAGE "$kubednsautoscaler_image" | g3kubectl apply -f -
+#g3k_kv_filter "${GEN3_HOME}/kube/services/kube-dns-autoscaler/dns-horizontal-autoscaler.yaml" SERVICE "coredns" IMAGE "$kubednsautoscaler_image" | g3kubectl apply -f -
g3kubectl apply -f ${cni_image}
g3kubectl apply -f ${calico_yaml}
diff --git a/gen3/bin/kube-setup-workvm.sh b/gen3/bin/kube-setup-workvm.sh
index 4b47be0fa..53424f89d 100644
--- a/gen3/bin/kube-setup-workvm.sh
+++ b/gen3/bin/kube-setup-workvm.sh
@@ -6,6 +6,7 @@
#
s3_bucket="${s3_bucket:-${2:-unknown}}"
+export DEBIAN_FRONTEND=noninteractive
# Make it easy to run this directly ...
_setup_workvm_dir="$(dirname -- "${BASH_SOURCE:-$0}")"
@@ -29,15 +30,16 @@ fi
if sudo -n true > /dev/null 2>&1 && [[ $(uname -s) == "Linux" ]]; then
# -E passes through *_proxy environment
- sudo -E apt-get update
- sudo -E apt-get install -y git jq pwgen python-dev python-pip unzip python3-dev python3-pip python3-venv
+ gen3_log_info "Install git jq pwgen unzip python3-dev python3-pip python3-venv libpq-dev apt-transport-https ca-certificates gnupg apt-utils"
+ sudo -E apt-get update -qq
+ sudo -E apt-get install -qq -y git jq pwgen unzip python3-dev python3-pip python3-venv libpq-dev apt-transport-https ca-certificates gnupg apt-utils > /dev/null
( # subshell
# install aws cli v2 - https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2-linux.html
# increase min version periodically - see https://github.com/aws/aws-cli/blob/v2/CHANGELOG.rst
update_awscli() {
local version="0.0.0"
- if aws --version; then
+ if aws --version > /dev/null 2>&1; then
version="$(aws --version | awk '{ print $1 }' | awk -F / '{ print $2 }')"
fi
if semver_ge "$version" "2.7.0"; then
@@ -46,6 +48,7 @@ if sudo -n true > /dev/null 2>&1 && [[ $(uname -s) == "Linux" ]]; then
fi
# update to latest version
( # subshell
+ gen3_log_info "Installing aws cli"
export DEBIAN_FRONTEND=noninteractive
if [[ -f /usr/local/bin/aws ]] && ! semver_ge "$version" "2.7.0"; then
sudo rm /usr/local/bin/aws
@@ -54,13 +57,14 @@ if sudo -n true > /dev/null 2>&1 && [[ $(uname -s) == "Linux" ]]; then
temp_dir="aws_install-$(date +%m%d%Y)"
mkdir $temp_dir
cd $temp_dir
- curl -o awscli.zip https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip
- unzip awscli.zip
+ curl -s -o awscli.zip https://awscli.amazonaws.com/awscli-exe-linux-$(uname -m || "x86_64").zip
+ unzip -qq awscli.zip
if semver_ge "$version" "2.7.0"; then
yes | sudo ./aws/install --update
else
yes | sudo ./aws/install
fi
+ aws --version
# cleanup
cd $HOME
rm -rf $temp_dir
@@ -70,18 +74,23 @@ if sudo -n true > /dev/null 2>&1 && [[ $(uname -s) == "Linux" ]]; then
update_awscli
)
- sudo -E XDG_CACHE_HOME=/var/cache python3 -m pip install --upgrade pip
+ gen3_log_info "Upgrading pip.."
+ sudo -E XDG_CACHE_HOME=/var/cache python3 -m pip install -q --upgrade pip
+
+ gen3_log_info "Installing jinja2 via pip"
+
# jinja2 needed by render_creds.py
- sudo -E XDG_CACHE_HOME=/var/cache python3 -m pip install jinja2
- # yq === jq for yaml
- sudo -E XDG_CACHE_HOME=/var/cache python3 -m pip install yq
+ sudo -E XDG_CACHE_HOME=/var/cache python3 -m pip install -q jinja2 yq --ignore-installed
+
# install nodejs
- if ! which node > /dev/null 2>&1; then
- curl -sL https://deb.nodesource.com/setup_12.x | sudo -E bash -
- sudo -E apt-get update
- sudo -E apt-get install -y nodejs
- fi
+ gen3_log_info "Install node js 16"
+ curl -fsSL https://deb.nodesource.com/setup_16.x | sudo -E bash - > /dev/null
+ sudo apt install -qq -y nodejs > /dev/null
+
+ gen3_log_info "Node: Version $(node --version)"
+
+
if [[ ! -f /etc/apt/sources.list.d/google-cloud-sdk.list ]]; then
# might need to uninstall gcloud installed from ubuntu repo
if which gcloud > /dev/null 2>&1; then
@@ -89,7 +98,8 @@ if sudo -n true > /dev/null 2>&1 && [[ $(uname -s) == "Linux" ]]; then
fi
fi
if ! which psql > /dev/null 2>&1; then
- (
+ (
+ gen3_log_info "Install postgres-client"
# use the postgres dpkg server
# https://www.postgresql.org/download/linux/ubuntu/
DISTRO="$(lsb_release -c -s)" # ex - xenial
@@ -97,32 +107,31 @@ if sudo -n true > /dev/null 2>&1 && [[ $(uname -s) == "Linux" ]]; then
echo "deb http://apt.postgresql.org/pub/repos/apt/ ${DISTRO}-pgdg main" | sudo tee /etc/apt/sources.list.d/pgdg.list
fi
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -
- sudo -E apt-get update
- sudo -E apt-get install -y postgresql-client-13
+ sudo -E apt-get -qq update
+ sudo -E apt-get install -qq -y postgresql-client-13 > /dev/null
)
fi
- # gen3sdk currently requires this
- sudo -E apt-get install -y libpq-dev
+
if ! which gcloud > /dev/null 2>&1; then
(
- export CLOUD_SDK_REPO="cloud-sdk-$(lsb_release -c -s)"
- sudo -E bash -c "echo 'deb https://packages.cloud.google.com/apt $CLOUD_SDK_REPO main' > /etc/apt/sources.list.d/google-cloud-sdk.list"
- curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo -E apt-key add -
- sudo -E apt-get update
- sudo -E apt-get install -y google-cloud-sdk \
- google-cloud-sdk-cbt
+ gen3_log_info "Install google cloud cli"
+ sudo -E bash -c "echo 'deb https://packages.cloud.google.com/apt cloud-sdk main' | sudo tee -a /etc/apt/sources.list.d/google-cloud-sdk.list"
+ curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo -E apt-key add -
+ curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key --keyring /usr/share/keyrings/cloud.google.gpg add -
+ sudo -E apt-get update -qq
+ sudo -E apt-get install -qq -y google-cloud-sdk \
+ google-cloud-sdk-cbt > /dev/null
+
)
+
fi
- k8s_server_version=$(kubectl version --short | awk -F[v.] '/Server/ {print $3"."$4}')
- if [[ ! -z "${k8s_server_version// }" ]]; then
- # install kubectl
- install_version=$(apt-cache madison kubectl | awk '$3 ~ /'$k8s_server_version'/ {print $3}'| head -n 1)
- gen3_log_info "Installing kubectl version $install_version"
- sudo -E apt-get install -y kubectl=$install_version --allow-downgrades
- else
- # install kubectl
- sudo -E apt-get install -y kubectl=1.21.14-00 --allow-downgrades
+ if ! which kubectl > /dev/null 2>&1; then
+ gen3_log_info "Installing kubectl"
+ sudo -E apt-get install -qq -y kubectl > /dev/null
+ else
+ gen3_log_info "Upgrading kubectl"
+ sudo -E apt-get upgrade -qq -y kubectl > /dev/null
fi
mkdir -p ~/.config
@@ -130,26 +139,29 @@ if sudo -n true > /dev/null 2>&1 && [[ $(uname -s) == "Linux" ]]; then
( # in a subshell - install terraform
install_terraform() {
- curl -o "${XDG_RUNTIME_DIR}/terraform.zip" https://releases.hashicorp.com/terraform/0.11.15/terraform_0.11.15_linux_amd64.zip
+ gen3_log_info "Installing terraform 0.11"
+ curl -s -o "${XDG_RUNTIME_DIR}/terraform.zip" https://releases.hashicorp.com/terraform/0.11.15/terraform_0.11.15_linux_amd64.zip
sudo /bin/rm -rf /usr/local/bin/terraform > /dev/null 2>&1 || true
- sudo unzip "${XDG_RUNTIME_DIR}/terraform.zip" -d /usr/local/bin;
+ sudo unzip -qq "${XDG_RUNTIME_DIR}/terraform.zip" -d /usr/local/bin;
/bin/rm "${XDG_RUNTIME_DIR}/terraform.zip"
}
install_terraform12() {
+ gen3_log_info "Installing terraform 0.12"
mkdir "${XDG_RUNTIME_DIR}/t12"
- curl -o "${XDG_RUNTIME_DIR}/t12/terraform12.zip" https://releases.hashicorp.com/terraform/0.12.31/terraform_0.12.31_linux_amd64.zip
+ curl -s -o "${XDG_RUNTIME_DIR}/t12/terraform12.zip" https://releases.hashicorp.com/terraform/0.12.31/terraform_0.12.31_linux_amd64.zip
sudo /bin/rm -rf /usr/local/bin/terraform12 > /dev/null 2>&1 || true
- unzip "${XDG_RUNTIME_DIR}/t12/terraform12.zip" -d "${XDG_RUNTIME_DIR}/t12";
+ unzip -qq "${XDG_RUNTIME_DIR}/t12/terraform12.zip" -d "${XDG_RUNTIME_DIR}/t12";
sudo cp "${XDG_RUNTIME_DIR}/t12/terraform" "/usr/local/bin/terraform12"
/bin/rm -rf "${XDG_RUNTIME_DIR}/t12"
}
install_terraform1.2() {
+ gen3_log_info "Installing terraform 1.2"
mkdir "${XDG_RUNTIME_DIR}/t1.2"
- curl -o "${XDG_RUNTIME_DIR}/t1.2/terraform1.2.zip" https://releases.hashicorp.com/terraform/1.2.3/terraform_1.2.3_linux_amd64.zip
+ curl -s -o "${XDG_RUNTIME_DIR}/t1.2/terraform1.2.zip" https://releases.hashicorp.com/terraform/1.2.3/terraform_1.2.3_linux_amd64.zip
sudo /bin/rm -rf /usr/local/bin/terraform1.2 > /dev/null 2>&1 || true
- unzip "${XDG_RUNTIME_DIR}/t1.2/terraform1.2.zip" -d "${XDG_RUNTIME_DIR}/t1.2";
+ unzip -qq "${XDG_RUNTIME_DIR}/t1.2/terraform1.2.zip" -d "${XDG_RUNTIME_DIR}/t1.2";
sudo cp "${XDG_RUNTIME_DIR}/t1.2/terraform" "/usr/local/bin/terraform1.2"
/bin/rm -rf "${XDG_RUNTIME_DIR}/t1.2"
}
@@ -213,8 +225,9 @@ EOM
)
fi
if ! which packer > /dev/null 2>&1; then
- curl -o "${XDG_RUNTIME_DIR}/packer.zip" https://releases.hashicorp.com/packer/1.5.1/packer_1.5.1_linux_amd64.zip
- sudo unzip "${XDG_RUNTIME_DIR}/packer.zip" -d /usr/local/bin
+ gen3_log_info "Installing packer"
+ curl -s -o "${XDG_RUNTIME_DIR}/packer.zip" https://releases.hashicorp.com/packer/1.5.1/packer_1.5.1_linux_amd64.zip
+ sudo unzip -qq "${XDG_RUNTIME_DIR}/packer.zip" -d /usr/local/bin
/bin/rm "${XDG_RUNTIME_DIR}/packer.zip"
fi
# https://docs.aws.amazon.com/eks/latest/userguide/install-aws-iam-authenticator.html
@@ -222,23 +235,16 @@ EOM
(
gen3_log_info "installing aws-iam-authenticator"
cd /usr/local/bin
- sudo curl -o aws-iam-authenticator https://amazon-eks.s3.us-west-2.amazonaws.com/1.18.8/2020-09-18/bin/linux/amd64/aws-iam-authenticator
+ sudo curl -s -o aws-iam-authenticator https://amazon-eks.s3.us-west-2.amazonaws.com/1.18.8/2020-09-18/bin/linux/amd64/aws-iam-authenticator
sudo chmod a+rx ./aws-iam-authenticator
- sudo rm /usr/local/bin/heptio-authenticator-aws || true
- # link heptio-authenticator-aws for backward compatability with old scripts
- sudo ln -s /usr/local/bin/aws-iam-authenticator heptio-authenticator-aws
)
fi
( # in a subshell install helm
install_helm() {
- helm_release_URL="https://get.helm.sh/helm-v3.4.0-linux-amd64.tar.gz"
- curl -o "${XDG_RUNTIME_DIR}/helm.tar.gz" ${helm_release_URL}
+ helm_release_URL="https://get.helm.sh/helm-v3.9.4-linux-amd64.tar.gz"
+ curl -s -o "${XDG_RUNTIME_DIR}/helm.tar.gz" ${helm_release_URL}
tar xf "${XDG_RUNTIME_DIR}/helm.tar.gz" -C ${XDG_RUNTIME_DIR}
sudo mv -f "${XDG_RUNTIME_DIR}/linux-amd64/helm" /usr/local/bin
-
- # helm3 has no default repo, need to add it manually
- helm repo add stable https://charts.helm.sh/stable --force-update
- helm repo update
}
migrate_helm() {
@@ -354,3 +360,5 @@ fi
npm install || true
fi
)
+
+source ${WORKSPACE}/.${RC_FILE}
\ No newline at end of file
diff --git a/gen3/bin/kube-setup-wts.sh b/gen3/bin/kube-setup-wts.sh
index b807da2d5..ad8211d03 100644
--- a/gen3/bin/kube-setup-wts.sh
+++ b/gen3/bin/kube-setup-wts.sh
@@ -42,6 +42,8 @@ new_client() {
"oidc_client_id": "$client_id",
"oidc_client_secret": "$client_secret",
+ "aggregate_endpoint_allowlist": ["/authz/mapping"],
+
"external_oidc": []
}
EOM
diff --git a/gen3/bin/kube-wait4-pods.sh b/gen3/bin/kube-wait4-pods.sh
index 2da695e4c..03068b50d 100644
--- a/gen3/bin/kube-wait4-pods.sh
+++ b/gen3/bin/kube-wait4-pods.sh
@@ -11,20 +11,20 @@ help() {
in the 'waiting' state.
Use to wait till all launched services
are up and healthy before performing some action.
- Waits for up to 15 minutes. Non-zero exit code
- if 15 minutes expires, and pods are still not ready.
+ Waits for up to 60 minutes. Non-zero exit code
+ if 60 minutes expires, and pods are still not ready.
EOM
return 0
}
-MAX_RETRIES=${1:-180}
+MAX_RETRIES=${1:-360}
IS_K8S_RESET="${2:-false}"
if [[ ! "$MAX_RETRIES" =~ ^[0-9]+$ ]];
then
gen3_log_err "ignoring invalid retry count: $1"
- MAX_RETRIES=180
+ MAX_RETRIES=360
fi
if [[ ! "$IS_K8S_RESET" =~ ^(true$|false$) ]];
diff --git a/gen3/bin/migrate-to-vpc-cni.sh b/gen3/bin/migrate-to-vpc-cni.sh
new file mode 100644
index 000000000..510d9ebef
--- /dev/null
+++ b/gen3/bin/migrate-to-vpc-cni.sh
@@ -0,0 +1,138 @@
+#!/bin/bash
+
+source "${GEN3_HOME}/gen3/lib/utils.sh"
+gen3_load "gen3/gen3setup"
+
+#Get the K8s NS
+ctx="$(g3kubectl config current-context)"
+ctxNamespace="$(g3kubectl config view -ojson | jq -r ".contexts | map(select(.name==\"$ctx\")) | .[0] | .context.namespace")"
+
+# Set the cluster name variable
+CLUSTER_NAME=`gen3 api environment`
+
+# Check if in default ns
+if [[ ("$ctxNamespace" != "default" && "$ctxNamespace" != "null") ]]; then
+ gen3_log_err "Namespace must be default"
+ exit 1
+fi
+
+# Cd into Cloud-automation repo and pull the latest from master
+gen3_log_info "Pulling the latest from Cloud-Auto"
+cd /home/$CLUSTER_NAME/cloud-automation || { gen3_log_err "Cloud-automation repo not found"; exit 1; }
+#### Change to master
+git checkout master || { gen3_log_err "Failed to checkout master branch"; exit 1; }
+git pull || { gen3_log_err "Failed to pull from the repository"; exit 1; }
+
+# Update the Karpenter Node Template
+gen3_log_info "Apply new Karpenter Node Template"
+if [[ -d $(g3k_manifest_init)/$(g3k_hostname)/manifests/karpenter ]]; then
+ gen3_log_info "Karpenter setup in manifest. Open a cdismanifest PR and add this line to aws node templates: https://github.com/uc-cdis/cloud-automation/blob/master/kube/services/karpenter/nodeTemplateDefault.yaml#L40"
+ while true; do
+ read -p "Have you updated your manifest? (yes/no): " yn
+ case $yn in
+ [Yy]* )
+ gen3_log_info "Proceeding with Karpenter deployment..."
+ gen3 kube-setup-karpenter deploy --force || { gen3_log_err "kube-setup-karpenter failed"; exit 1; }
+ break
+ ;;
+ [Nn]* )
+ gen3_log_info "Please update the cdismanifest before proceeding."
+ exit 1
+ ;;
+ * )
+ gen3_log_info "Please answer yes or no."
+ ;;
+ esac
+ done
+else
+ gen3 kube-setup-karpenter deploy --force || { gen3_log_err "kube-setup-karpenter failed"; exit 1; }
+fi
+
+# Cordon all the nodes before running gen3 roll all"
+gen3_log_info "Cordoning all nodes"
+kubectl get nodes --no-headers -o custom-columns=":metadata.name" | grep -v '^fargate' | xargs -I{} kubectl cordon {}
+
+# Run a "gen3 roll all" so all nodes use the new mounted BPF File System
+gen3_log_info "Cycling all the nodes by running gen3 roll all"
+gen3 roll all --fast || exit 1
+
+# Confirm that all nodes have been rotated
+while true; do
+ read -p "Roll all complete. Have all cordoned nodes been rotated? (yes/no): " yn
+ case $yn in
+ [Yy]* )
+ gen3_log_info "Continuing with script..."
+ break
+ ;;
+ [Nn]* )
+ gen3_log_info "Please drain any remaining nodes with 'kubectl drain --ignore-daemonsets --delete-emptydir-data'"
+ ;;
+ * )
+ gen3_log_info "Please answer yes or no."
+ ;;
+ esac
+done
+
+
+# Delete all existing network policies
+gen3_log_info "Deleting networkpolicies"
+kubectl delete networkpolicies --all
+
+# Delete all Calico related resources from the “kube-system” namespace
+gen3_log_info "Deleting all Calico related resources"
+kubectl get deployments -n kube-system | grep calico | awk '{print $1}' | xargs kubectl delete deployment -n kube-system
+kubectl get daemonsets -n kube-system | grep calico | awk '{print $1}' | xargs kubectl delete daemonset -n kube-system
+kubectl get services -n kube-system | grep calico | awk '{print $1}' | xargs kubectl delete service -n kube-system
+kubectl get replicasets -n kube-system | grep calico | awk '{print $1}' | xargs kubectl delete replicaset -n kube-system
+
+# Backup the current VPC CNI configuration in case of rollback
+gen3_log_info "Backing up current VPC CNI Configuration..."
+kubectl get daemonset aws-node -n kube-system -o yaml > aws-k8s-cni-old.yaml || { gen3_log_err "Error backig up VPC CNI configuration"; exit 1; }
+
+# Check to ensure we are not using an AWS plugin to manage the VPC CNI Plugin
+if aws eks describe-addon --cluster-name "$CLUSTER_NAME" --addon-name vpc-cni --query addon.addonVersion --output text 2>/dev/null; then
+ gen3_log_err "Error: VPC CNI Plugin is managed by AWS. Please log into the AWS UI and delete the VPC CNI Plugin in Amazon EKS, then re-run this script."
+ exit 1
+else
+ gen3_log_info "No managed VPC CNI Plugin found, proceeding with the script."
+fi
+
+# Apply the new VPC CNI Version
+gen3_log_info "Applying new version of VPC CNI"
+g3kubectl apply -f https://raw.githubusercontent.com/aws/amazon-vpc-cni-k8s/v1.14.1/config/master/aws-k8s-cni.yaml || { gen3_log_err "Failed to apply new VPC CNI version"; exit 1; }
+
+# Check the version to make sure it updated
+NEW_VERSION=$(kubectl describe daemonset aws-node --namespace kube-system | grep amazon-k8s-cni: | cut -d : -f 3)
+gen3_log_info "Current version of aws-k8s-cni is: $NEW_VERSION"
+if [ "$NEW_VERSION" != "v1.14.1" ]; then
+ gen3_log_info "The version of aws-k8s-cni has not been updated correctly."
+ exit 1
+fi
+
+# Edit the amazon-vpc-cni configmap to enable network policy controller
+gen3_log_info "Enabling NetworkPolicies in VPC CNI Configmap"
+kubectl patch configmap -n kube-system amazon-vpc-cni --type merge -p '{"data":{"enable-network-policy-controller":"true"}}' || { gen3_log_err "Configmap patch failed"; exit 1; }
+
+# Edit the aws-node daemonset
+gen3_log_info "Enabling NetworkPolicies in aws-node Daemonset"
+kubectl patch daemonset aws-node -n kube-system --type=json -p='[{"op": "add", "path": "/spec/template/spec/containers/1/args", "value": ["--enable-network-policy=true", "--enable-ipv6=false", "--enable-cloudwatch-logs=false", "--metrics-bind-addr=:8162", "--health-probe-bind-addr=:8163"]}]' || { gen3_log_err "Daemonset edit failed"; exit 1; }
+
+# Ensure all the aws-nodes are running
+kubectl get pods -n kube-system | grep aws
+while true; do
+ read -p "Do all the aws-node pods in the kube-system ns have 2/2 containers running? (yes/no): " yn
+ case $yn in
+ [Yy]* )
+ gen3_log_info "Running kube-setup-networkpolicy..."
+ gen3 kube-setup-networkpolicy || exit 1
+ break
+ ;;
+ [Nn]* )
+ gen3_log_err "Look at aws-node logs to figure out what went wrong. View this document for more details: https://docs.google.com/document/d/1fcBTciQSSwjvHktEnO_7EObY-xR_EvJ2NtgUa70wvL8"
+ gen3_log_info "Rollback instructions are also available in the above document"
+ ;;
+ * )
+ gen3_log_info "Please answer yes or no."
+ ;;
+ esac
+done
\ No newline at end of file
diff --git a/gen3/bin/mutate-guppy-config-for-guppy-test.sh b/gen3/bin/mutate-guppy-config-for-guppy-test.sh
index de7da10d5..151bb7169 100644
--- a/gen3/bin/mutate-guppy-config-for-guppy-test.sh
+++ b/gen3/bin/mutate-guppy-config-for-guppy-test.sh
@@ -16,7 +16,7 @@ sed -i 's/\(.*\)"index": "\(.*\)_etl",$/\1"index": "jenkins_subject_alias",/' or
# for bloodpac-like envs
sed -i 's/\(.*\)"index": "\(.*\)_case",$/\1"index": "jenkins_subject_alias",/' original_guppy_config.yaml
# the pre-defined Canine index works with subject ONLY (never case)
-sed -i 's/\(.*\)"type": "case"$/\1"type": "subject"/' original_guppy_config.yaml
+# sed -i 's/\(.*\)"type": "case"$/\1"type": "subject"/' original_guppy_config.yaml
sed -i 's/\(.*\)"index": "\(.*\)_file",$/\1"index": "jenkins_file_alias",/' original_guppy_config.yaml
sed -i 's/\(.*\)"config_index": "\(.*\)_array-config",$/\1"config_index": "jenkins_configs_alias",/' original_guppy_config.yaml
diff --git a/gen3/bin/reset.sh b/gen3/bin/reset.sh
index 6dac0ea16..045da4319 100644
--- a/gen3/bin/reset.sh
+++ b/gen3/bin/reset.sh
@@ -137,8 +137,12 @@ sleep 30
#
for serviceName in $(gen3 db services); do
if [[ "$serviceName" != "peregrine" ]]; then # sheepdog and peregrine share the same db
- # --force will also drop connections to the database to ensure database gets dropped
- gen3 db reset "$serviceName" --force
+ if [[ "$serviceName" != "argo"]]; then
+ # --force will also drop connections to the database to ensure database gets dropped
+ gen3 db reset "$serviceName" --force
+ else
+ echo "Skipping the Argo DB reset, as that will delete archived workflows."
+ fi
fi
done
diff --git a/gen3/bin/sqs.sh b/gen3/bin/sqs.sh
index dccb1ff7b..7448437a0 100644
--- a/gen3/bin/sqs.sh
+++ b/gen3/bin/sqs.sh
@@ -50,15 +50,15 @@ EOM
# @sqsName
#
gen3_sqs_create_queue() {
- local sqsName=$1
- if ! shift || [[ -z "$sqsName" ]]; then
- gen3_log_err "Must provide 'sqsName' to 'gen3_sqs_create_queue'"
+ local serviceName=$1
+ if ! shift || [[ -z "$serviceName" ]]; then
+ gen3_log_err "Must provide 'serviceName' to 'gen3_sqs_create_queue'"
return 1
fi
+ local sqsName="$(gen3 api safe-name $serviceName)"
gen3_log_info "Creating SQS '$sqsName'"
- local prefix="$(gen3 api safe-name sqs-create)"
( # subshell - do not pollute parent environment
- gen3 workon default ${prefix}__sqs 1>&2
+ gen3 workon default ${sqsName}__sqs 1>&2
gen3 cd 1>&2
cat << EOF > config.tfvars
sqs_name="$sqsName"
@@ -76,7 +76,8 @@ EOF
# @sqsName
#
gen3_sqs_create_queue_if_not_exist() {
- local sqsName=$1
+ local serviceName=$1
+ local sqsName="$(gen3 api safe-name $serviceName)"
if ! shift || [[ -z "$sqsName" ]]; then
gen3_log_err "Must provide 'sqsName' to 'gen3_sqs_create_queue'"
return 1
@@ -90,7 +91,7 @@ gen3_sqs_create_queue_if_not_exist() {
gen3_log_info "The '$sqsName' SQS already exists"
else
# create the queue
- sqsInfo="$(gen3_sqs_create_queue $sqsName)" || exit 1
+ sqsInfo="$(gen3_sqs_create_queue $serviceName)" || exit 1
sqsUrl="$(jq -e -r '.["sqs-url"].value' <<< "$sqsInfo")" || { echo "Cannot get 'sqs-url' from output: $sqsInfo"; exit 1; }
sqsArn="$(jq -e -r '.["sqs-arn"].value' <<< "$sqsInfo")" || { echo "Cannot get 'sqs-arn' from output: $sqsInfo"; exit 1; }
fi
diff --git a/gen3/bin/waf-rules-GPE-312.json b/gen3/bin/waf-rules-GPE-312.json
new file mode 100644
index 000000000..b8cdccabe
--- /dev/null
+++ b/gen3/bin/waf-rules-GPE-312.json
@@ -0,0 +1,153 @@
+[
+ {
+ "Name": "AWS-AWSManagedRulesAdminProtectionRuleSet",
+ "Priority": 0,
+ "Statement": {
+ "ManagedRuleGroupStatement": {
+ "VendorName": "AWS",
+ "Name": "AWSManagedRulesAdminProtectionRuleSet",
+ "RuleActionOverrides": [
+ {
+ "Name": "AdminProtection_URIPATH",
+ "ActionToUse": {
+ "Challenge": {}
+ }
+ }
+ ]
+ }
+ },
+ "OverrideAction": {
+ "None": {}
+ },
+ "VisibilityConfig": {
+ "SampledRequestsEnabled": true,
+ "CloudWatchMetricsEnabled": true,
+ "MetricName": "AWS-AWSManagedRulesAdminProtectionRuleSet"
+ }
+ },
+ {
+ "Name": "AWS-AWSManagedRulesAmazonIpReputationList",
+ "Priority": 1,
+ "Statement": {
+ "ManagedRuleGroupStatement": {
+ "VendorName": "AWS",
+ "Name": "AWSManagedRulesAmazonIpReputationList",
+ "RuleActionOverrides": [
+ {
+ "Name": "AWSManagedReconnaissanceList",
+ "ActionToUse": {
+ "Count": {}
+ }
+ }
+ ]
+ }
+ },
+ "OverrideAction": {
+ "None": {}
+ },
+ "VisibilityConfig": {
+ "SampledRequestsEnabled": true,
+ "CloudWatchMetricsEnabled": true,
+ "MetricName": "AWS-AWSManagedRulesAmazonIpReputationList"
+ }
+ },
+ {
+ "Name": "AWS-AWSManagedRulesCommonRuleSet",
+ "Priority": 2,
+ "Statement": {
+ "ManagedRuleGroupStatement": {
+ "VendorName": "AWS",
+ "Name": "AWSManagedRulesCommonRuleSet",
+ "Version": "Version_1.4",
+ "RuleActionOverrides": [
+ {
+ "Name": "EC2MetaDataSSRF_BODY",
+ "ActionToUse": {
+ "Count": {}
+ }
+ },
+ {
+ "Name": "GenericLFI_BODY",
+ "ActionToUse": {
+ "Allow": {}
+ }
+ },
+ {
+ "Name": "SizeRestrictions_QUERYSTRING",
+ "ActionToUse": {
+ "Count": {}
+ }
+ },
+ {
+ "Name": "SizeRestrictions_BODY",
+ "ActionToUse": {
+ "Allow": {}
+ }
+ },
+ {
+ "Name": "CrossSiteScripting_BODY",
+ "ActionToUse": {
+ "Count": {}
+ }
+ },
+ {
+ "Name": "SizeRestrictions_URIPATH",
+ "ActionToUse": {
+ "Allow": {}
+ }
+ },
+ {
+ "Name": "SizeRestrictions_Cookie_HEADER",
+ "ActionToUse": {
+ "Allow": {}
+ }
+ }
+ ]
+ }
+ },
+ "OverrideAction": {
+ "None": {}
+ },
+ "VisibilityConfig": {
+ "SampledRequestsEnabled": true,
+ "CloudWatchMetricsEnabled": true,
+ "MetricName": "AWS-AWSManagedRulesCommonRuleSet"
+ }
+ },
+ {
+ "Name": "AWS-AWSManagedRulesKnownBadInputsRuleSet",
+ "Priority": 3,
+ "Statement": {
+ "ManagedRuleGroupStatement": {
+ "VendorName": "AWS",
+ "Name": "AWSManagedRulesKnownBadInputsRuleSet"
+ }
+ },
+ "OverrideAction": {
+ "None": {}
+ },
+ "VisibilityConfig": {
+ "SampledRequestsEnabled": true,
+ "CloudWatchMetricsEnabled": true,
+ "MetricName": "AWS-AWSManagedRulesKnownBadInputsRuleSet"
+ }
+ },
+ {
+ "Name": "AWS-AWSManagedRulesLinuxRuleSet",
+ "Priority": 4,
+ "Statement": {
+ "ManagedRuleGroupStatement": {
+ "VendorName": "AWS",
+ "Name": "AWSManagedRulesLinuxRuleSet"
+ }
+ },
+ "OverrideAction": {
+ "None": {}
+ },
+ "VisibilityConfig": {
+ "SampledRequestsEnabled": true,
+ "CloudWatchMetricsEnabled": true,
+ "MetricName": "AWS-AWSManagedRulesLinuxRuleSet"
+ }
+ }
+]
\ No newline at end of file
diff --git a/gen3/bin/workon.sh b/gen3/bin/workon.sh
index e7b951d1c..f614cf662 100644
--- a/gen3/bin/workon.sh
+++ b/gen3/bin/workon.sh
@@ -113,7 +113,7 @@ if [[ ! -f "$bucketCheckFlag" && "$GEN3_FLAVOR" == "AWS" ]]; then
}
EOM
)
- gen3_aws_run aws s3api create-bucket --acl private --bucket "$GEN3_S3_BUCKET" --create-bucket-configuration ‘{“LocationConstraint”:“‘$(aws configure get $GEN3_PROFILE.region)‘“}’
+ gen3_aws_run aws s3api create-bucket --acl private --bucket "$GEN3_S3_BUCKET" $([[ $(aws configure get $GEN3_PROFILE.region) = "us-east-1" ]] && echo "" || echo --create-bucket-configuration LocationConstraint="$(aws configure get $GEN3_PROFILE.region)")
sleep 5 # Avoid race conditions
if gen3_aws_run aws s3api put-bucket-encryption --bucket "$GEN3_S3_BUCKET" --server-side-encryption-configuration "$S3_POLICY"; then
touch "$bucketCheckFlag"
diff --git a/gen3/lib/aws.sh b/gen3/lib/aws.sh
index 9dd6e4402..096b95753 100644
--- a/gen3/lib/aws.sh
+++ b/gen3/lib/aws.sh
@@ -535,7 +535,7 @@ customer_id = ""
# Enable/Disable Federal Information Processing Standards (FIPS) in EKS nodes. You need to have FIPS enabled AMI to enable this.
fips = false
fips_ami_kms = "arn:aws:kms:us-east-1:707767160287:key/mrk-697897f040ef45b0aa3cebf38a916f99"
-fips_enabled_ami = "ami-0de87e3680dcb13ec"
+fips_enabled_ami = "ami-074d352c8e753fc93"
# AZs where to deploy the kubernetes worker nodes.
availability_zones = ["us-east-1a", "us-east-1c", "us-east-1d"]
diff --git a/gen3/lib/logs/snapshot.sh b/gen3/lib/logs/snapshot.sh
index 31cb80283..ae769a285 100644
--- a/gen3/lib/logs/snapshot.sh
+++ b/gen3/lib/logs/snapshot.sh
@@ -36,10 +36,11 @@ gen3_logs_snapshot_container() {
# Snapshot all the pods
#
gen3_logs_snapshot_all() {
+ # For each pod for which we can list the containers, get the pod name and get its list of containers
+ # (container names + initContainers names). Diplay them as lines of " ".
g3kubectl get pods -o json | \
- jq -r '.items | map(select(.status.phase != "Pending" and .status.phase != "Unknown")) | map( {pod: .metadata.name, containers: .spec.containers | map(.name) } ) | map( .pod as $pod | .containers | map( { pod: $pod, cont: .})[]) | map(select(.cont != "pause" and .cont != "jupyterhub"))[] | .pod + " " + .cont' | \
+ jq -r '.items | map(select(.status.phase != "Pending" and .status.phase != "Unknown")) | .[] | .metadata.name as $pod | (.spec.containers + .spec.initContainers) | map(select(.name != "pause" and .name != "jupyterhub")) | .[] | {pod: $pod, cont: .name} | "\(.pod) \(.cont)"' | \
while read -r line; do
gen3_logs_snapshot_container $line
done
}
-
diff --git a/gen3/lib/testData/default/expectedFenceResult.yaml b/gen3/lib/testData/default/expectedFenceResult.yaml
index 7bc373ad0..98c360531 100644
--- a/gen3/lib/testData/default/expectedFenceResult.yaml
+++ b/gen3/lib/testData/default/expectedFenceResult.yaml
@@ -32,7 +32,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -41,6 +41,22 @@ spec:
values:
- fence
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
volumes:
# -----------------------------------------------------------------------------
@@ -127,6 +143,7 @@ spec:
ports:
- containerPort: 80
- containerPort: 443
+ - containerPort: 6567
volumeMounts:
# -----------------------------------------------------------------------------
# DEPRECATED! Remove when all commons are no longer using local_settings.py
@@ -191,12 +208,12 @@ spec:
mountPath: "/fence/jwt-keys.tar"
subPath: "jwt-keys.tar"
resources:
- requests:
- cpu: 0.4
- memory: 1200Mi
- limits:
- cpu: 1.0
- memory: 2400Mi
+ requests:
+ cpu: 0.4
+ memory: 1200Mi
+ limits:
+ cpu: 1.0
+ memory: 2400Mi
command: ["/bin/bash"]
args:
- "-c"
diff --git a/gen3/lib/testData/default/expectedSheepdogResult.yaml b/gen3/lib/testData/default/expectedSheepdogResult.yaml
index ea8f81dbd..a2bd3efcc 100644
--- a/gen3/lib/testData/default/expectedSheepdogResult.yaml
+++ b/gen3/lib/testData/default/expectedSheepdogResult.yaml
@@ -17,6 +17,7 @@ spec:
template:
metadata:
labels:
+ netnolimit: "yes"
app: sheepdog
release: production
public: "yes"
@@ -27,7 +28,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -36,6 +37,22 @@ spec:
values:
- sheepdog
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
automountServiceAccountToken: false
volumes:
- name: config-volume
@@ -148,8 +165,7 @@ spec:
imagePullPolicy: Always
resources:
requests:
- cpu: 0.8
- memory: 1024Mi
+ cpu: 100m
+ memory: 200Mi
limits:
- cpu: 2
- memory: 2048Mi
\ No newline at end of file
+ memory: 800Mi
\ No newline at end of file
diff --git a/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml b/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml
index 66fb41ca4..adc35ad2f 100644
--- a/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml
+++ b/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml
@@ -35,7 +35,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -44,6 +44,22 @@ spec:
values:
- fence
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
volumes:
# -----------------------------------------------------------------------------
diff --git a/gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml b/gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml
index 5d0025950..08407ae52 100644
--- a/gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml
+++ b/gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml
@@ -31,7 +31,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -40,6 +40,22 @@ spec:
values:
- sheepdog
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
automountServiceAccountToken: false
volumes:
- name: config-volume
@@ -182,8 +198,7 @@ spec:
imagePullPolicy: Always
resources:
requests:
- cpu: 0.8
- memory: 1024Mi
+ cpu: 100m
+ memory: 200Mi
limits:
- cpu: 2
- memory: 2048Mi
+ memory: 800Mi
diff --git a/gen3/test/ec2Test.sh b/gen3/test/ec2Test.sh
index 21310a24c..4981c925c 100644
--- a/gen3/test/ec2Test.sh
+++ b/gen3/test/ec2Test.sh
@@ -1,6 +1,6 @@
-if ! EC2_TEST_IP="$(g3kubectl get nodes -o json | jq -r -e '.items[0].status.addresses[] | select(.type == "InternalIP") | .address')" || [[ -z "$EC2_TEST_IP" ]]; then
+if ! EC2_TEST_IP="$(g3kubectl get nodes -o json | jq -r -e '.items[3].status.addresses[] | select(.type == "InternalIP") | .address')" || [[ -z "$EC2_TEST_IP" ]]; then
gen3_log_err "ec2Test failed to acquire IP address of a k8s node to test against"
fi
diff --git a/gen3/test/ecrTest.sh b/gen3/test/ecrTest.sh
index 91edf798b..57847abe5 100644
--- a/gen3/test/ecrTest.sh
+++ b/gen3/test/ecrTest.sh
@@ -10,8 +10,8 @@ test_ecr_login() {
test_ecr_setup() {
if [[ -n "$JENKINS_HOME" ]]; then
- # give ourselves read/write permissions on /var/run/docker.sock
- sudo chmod a+rw /var/run/docker.sock; because $? "ecr_setup modified docker.sock"
+ # give ourselves permissions on /run/containerd/containerd.sock
+ sudo chown root:sudo /run/containerd/containerd.sock; because $? "ecr_setup modified containerd.sock"
fi
}
diff --git a/gen3/test/jobTest.sh b/gen3/test/jobTest.sh
index 84a4d046b..bb37b4f72 100644
--- a/gen3/test/jobTest.sh
+++ b/gen3/test/jobTest.sh
@@ -6,7 +6,7 @@
excludeJob() {
local jobKey="$1"
local excludeList=(
- /aws-bucket- /bucket- /covid19- /data-ingestion- /google- /nb-etl- /remove-objects-from- /replicate- /s3sync- /fence-cleanup
+ /aws-bucket- /bucket- /covid19- /data-ingestion- /google- /nb-etl- /remove-objects-from- /replicate- /s3sync- /fence-cleanup /etl- /indexd- /metadata-
)
for exclude in "${excludeList[@]}"; do
if [[ "$it" =~ $exclude ]]; then return 0; fi
diff --git a/gen3/test/jupyterTest.sh b/gen3/test/jupyterTest.sh
index f0e327d71..db6a62618 100644
--- a/gen3/test/jupyterTest.sh
+++ b/gen3/test/jupyterTest.sh
@@ -30,7 +30,7 @@ test_jupyter_metrics() {
}
shunit_runtest "test_jupyter_idle" "jupyter"
-shunit_runtest "test_jupyter_metrics" "jupyter"
+# shunit_runtest "test_jupyter_metrics" "jupyter"
shunit_runtest "test_jupyter_prepuller" "local,jupyter"
shunit_runtest "test_jupyter_namespace" "local,jupyter"
shunit_runtest "test_jupyter_setup" "jupyter"
diff --git a/gen3/test/terraformTest.sh b/gen3/test/terraformTest.sh
deleted file mode 100644
index 17bcc03c2..000000000
--- a/gen3/test/terraformTest.sh
+++ /dev/null
@@ -1,461 +0,0 @@
-GEN3_TEST_PROFILE="${GEN3_TEST_PROFILE:-cdistest}"
-GEN3_TEST_WORKSPACE="gen3test"
-GEN3_TEST_ACCOUNT=707767160287
-
-#
-# TODO - generalize these tests to setup their own test VPC,
-# rather than relying on qaplanetv1 or devplanetv1 being there
-#
-
-#
-# Little macos/linux stat wrapper
-#
-file_mode() {
- if [[ $(uname -s) == 'Linux' ]]; then
- stat -c %a "$1"
- else
- stat -f %p "$1"
- fi
-}
-
-test_workspace() {
- gen3 workon $GEN3_TEST_PROFILE $GEN3_TEST_WORKSPACE; because $? "Calling gen3 workon multiple times should be harmless"
- [[ $GEN3_PROFILE = $GEN3_TEST_PROFILE ]]; because $? "gen3 workon sets the GEN3_PROFILE env variable: $GEN3_PROFILE"
- [[ $GEN3_WORKSPACE = $GEN3_TEST_WORKSPACE ]]; because $? "gen3 workon sets the GEN3_WORKSPACE env variable: $GEN3_WORKSPACE"
- [[ $GEN3_FLAVOR = "AWS" || \
- ($GEN3_FLAVOR == "GCP" && $GEN3_PROFILE =~ ^gcp-) || \
- ($GEN3_FLAVOR == "ONPREM" && $GEN3_PROFILE =~ ^onprem-) ]]; because $? "GEN3_FLAVOR is gcp for gcp-* profiles, else AWS"
- [[ $GEN3_FLAVOR != "AWS" || $GEN3_S3_BUCKET = "cdis-state-ac${GEN3_TEST_ACCOUNT}-gen3" || $GEN3_S3_BUCKET = "cdis-terraform-state.account-${GEN3_TEST_ACCOUNT}.gen3" ]]; because $? "gen3 workon sets the GEN3_S3_BUCKET env variable: $GEN3_S3_BUCKET"
- [[ (! -z $GEN3_WORKDIR) && -d $GEN3_WORKDIR ]]; because $? "gen3 workon sets the GEN3_WORKDIR env variable, and initializes the folder: $GEN3_WORKDIR"
- [[ $(file_mode $GEN3_WORKDIR) =~ 700$ ]]; because $? "gen3 workon sets the GEN3_WORKDIR to mode 0700, because secrets are in there"
- gen3 cd && [[ $(pwd) = "$GEN3_WORKDIR" ]]; because $? "gen3 cd should take us to the workspace by default: $(pwd) =? $GEN3_WORKDIR"
- for fileName in README.md config.tfvars backend.tfvars; do
- [[ -f $fileName ]]; because $? "gen3 workon ensures we have a $fileName - local copy || s3 copy || generated from template"
- done
- [[ ! -z "$MD5" ]]; because $? "commons.sh sets MD5 to $MD5"
-
- if [[ $GEN3_TEST_WORKSPACE =~ __custom$ ]]; then
- [[ "$GEN3_TFSCRIPT_FOLDER" == "$GEN3_WORKDIR" ]]; because $? "a __custom workspace loads from the workspace folder"
- elif [[ "$GEN3_TEST_PROFILE" =~ ^gcp- ]]; then
- [[ "$GEN3_TFSCRIPT_FOLDER" == "$GEN3_HOME/tf_files/gcp/commons" ]]; because $? "a gcp- profile currently only support a commons workspace"
- elif [[ "$GEN3_TEST_PROFILE" =~ ^onprem- ]]; then
- for fileName in README.md creds.json 00configmap.yaml kube-setup.sh; do
- filePath="onprem_scripts/$fileName"
- [[ -f $filePath ]]; because $? "gen3 workon ensures we have a $filePath generated from template"
- done
- else # aws profile
- [[ "$GEN3_TFSCRIPT_FOLDER" =~ ^"$GEN3_HOME/tf_files/aws/" ]]; because $? "an aws workspace references the aws/ folder: $GEN3_TFSCRIPT_FOLDER"
- fi
-}
-
-workspace_cleanup() {
- # try to avoid accidentally erasing the user's data ...
- cd /tmp && [[ -n "$GEN3_WORKDIR" && "$GEN3_WORKDIR" =~ /gen3/ && -f "$GEN3_WORKDIR/config.tfvars" ]] && /bin/rm -rf "$GEN3_WORKDIR";
- because $? "was able to cleanup $GEN3_WORKDIR"
-}
-
-test_uservpc_workspace() {
- GEN3_TEST_WORKSPACE="${GEN3_TEST_WORKSPACE}_user"
- test_workspace
- [[ "$GEN3_TFSCRIPT_FOLDER" == "$GEN3_HOME/tf_files/aws/user_vpc" ]]; because $? "a _user workspace should use the ./aws/user_vpc resources: $GEN3_TFSCRIPT_FOLDER"
- workspace_cleanup
-}
-
-test_usergeneric_workspace() {
- GEN3_TEST_WORKSPACE="${GEN3_TEST_WORKSPACE}_usergeneric"
- test_workspace
- [[ "$GEN3_TFSCRIPT_FOLDER" == "$GEN3_HOME/tf_files/aws/user_generic" ]]; because $? "a _usergeneric workspace should use the ./aws/user_generic resources: $GEN3_TFSCRIPT_FOLDER"
- cat << EOF > config.tfvars
-username="frickjack"
-EOF
- gen3 tfplan; because $? "_usergeneric tfplan should work";
- workspace_cleanup
-}
-
-test_snapshot_workspace() {
- GEN3_TEST_WORKSPACE="${GEN3_TEST_WORKSPACE}_snapshot"
- test_workspace
- [[ "$GEN3_TFSCRIPT_FOLDER" == "$GEN3_HOME/tf_files/aws/rds_snapshot" ]]; because $? "a _snapshot workspace should use the ./aws/rds_snapshot resources: $GEN3_TFSCRIPT_FOLDER"
- workspace_cleanup
-}
-
-test_databucket_workspace() {
- GEN3_TEST_WORKSPACE="${GEN3_TEST_WORKSPACE}_databucket"
- test_workspace
- [[ "$GEN3_TFSCRIPT_FOLDER" == "$GEN3_HOME/tf_files/aws/data_bucket" ]]; because $? "a _databucket workspace should use the ./aws/data_bucket resources: $GEN3_TFSCRIPT_FOLDER"
- cat - > config.tfvars < config.tfvars < config.tfvars < @ in password
-db_password_fence="whatever"
-
-db_password_gdcapi="whatever"
-db_password_sheepdog="whatever"
-db_password_peregrine="whatever"
-
-db_password_indexd="g6pmYkcoR7qECjGoErzVb5gkX3kum0yo"
-
-# password for write access to indexd
-gdcapi_indexd_password="oYva39mIPV5uXskv7jWnKuVZBUFBQcxd"
-
-fence_snapshot=""
-gdcapi_snapshot=""
-indexd_snapshot=""
-# mailgun for sending alert e-mails
-mailgun_api_key=""
-mailgun_api_url=""
-mailgun_smtp_host=""
-
-kube_ssh_key=""
-EOM
- [[ "$(pwd)" =~ "/$GEN3_WORKSPACE"$ ]]; because $? "commons workspace should have base $GEN3_WORKSPACE - $(pwd)"
- gen3 tfplan; because $? "tfplan should run even with some invalid config variables"
- [[ -f "$GEN3_WORKDIR/plan.terraform" ]]; because $? "'gen3 tfplan' generates a plan.terraform file used by 'gen3 tfapply'"
- workspace_cleanup
-}
-
-test_custom_workspace() {
- GEN3_TEST_WORKSPACE="${GEN3_TEST_WORKSPACE}__custom"
- test_workspace
-
- local sourceFolder="../../../../../cloud-automation/tf_files/aws/modules/s3-bucket"
- if [[ ! -d "$sourceFolder" ]]; then
- # Jenkins has a different relative path setup
- sourceFolder="../../../../cloud-automation/tf_files/aws/modules/s3-bucket"
- fi
- cat - > bucket.tf < config.tfvars < config.tfvars < config.tfvars < config.tfvars < config.tfvars < config.tfvars < config.tfvars < config.tfvars <> /home/ec2-user/.ssh/authorized_keys
+
+ echo "$(jq '.registryPullQPS=0' /etc/kubernetes/kubelet/kubelet-config.json)" > /etc/kubernetes/kubelet/kubelet-config.json
+
+ sysctl -w fs.inotify.max_user_watches=12000
+
+ sudo yum update -y
+ sudo yum install -y dracut-fips openssl >> /opt/fips-install.log
+ sudo dracut -f
+ # configure grub
+ sudo /sbin/grubby --update-kernel=ALL --args="fips=1"
+
+ --BOUNDARY
+ Content-Type: text/cloud-config; charset="us-ascii"
+
+ power_state:
+ delay: now
+ mode: reboot
+ message: Powering off
+ timeout: 2
+ condition: true
+
+ --BOUNDARY--
+ blockDeviceMappings:
+ - deviceName: /dev/xvda
+ ebs:
+ volumeSize: 100Gi
+ volumeType: gp2
+ encrypted: true
+ deleteOnTermination: true
diff --git a/kube/services/argo-events/workflows/eventsource-completed.yaml b/kube/services/argo-events/workflows/eventsource-completed.yaml
new file mode 100644
index 000000000..b3c7488fa
--- /dev/null
+++ b/kube/services/argo-events/workflows/eventsource-completed.yaml
@@ -0,0 +1,20 @@
+apiVersion: argoproj.io/v1alpha1
+kind: EventSource
+metadata:
+ name: argo-workflow-ended-source
+ namespace: argo-events
+spec:
+ template:
+ serviceAccountName: default
+ resource:
+ workflow-ended:
+ namespace: argo
+ group: argoproj.io
+ version: v1alpha1
+ resource: workflows
+ eventTypes:
+ - UPDATE
+ filter:
+ labels:
+ - key: workflows.argoproj.io/completed
+ value: "true"
diff --git a/kube/services/argo-events/workflows/eventsource-created.yaml b/kube/services/argo-events/workflows/eventsource-created.yaml
new file mode 100644
index 000000000..11d7084ca
--- /dev/null
+++ b/kube/services/argo-events/workflows/eventsource-created.yaml
@@ -0,0 +1,18 @@
+apiVersion: argoproj.io/v1alpha1
+kind: EventSource
+metadata:
+ name: argo-workflow-created-source
+ namespace: argo-events
+spec:
+ template:
+ serviceAccountName: default
+ resource:
+ workflow-created:
+ namespace: argo
+ group: argoproj.io
+ version: v1alpha1
+ resource: workflows
+ eventTypes:
+ - ADD
+ filter:
+ afterStart: true
diff --git a/kube/services/argo-events/workflows/eventsource-deleted.yaml b/kube/services/argo-events/workflows/eventsource-deleted.yaml
new file mode 100644
index 000000000..54a00464e
--- /dev/null
+++ b/kube/services/argo-events/workflows/eventsource-deleted.yaml
@@ -0,0 +1,16 @@
+apiVersion: argoproj.io/v1alpha1
+kind: EventSource
+metadata:
+ name: argo-workflow-deleted-source
+ namespace: argo-events
+spec:
+ template:
+ serviceAccountName: default
+ resource:
+ workflow-deleted:
+ namespace: argo
+ group: argoproj.io
+ version: v1alpha1
+ resource: workflows
+ eventTypes:
+ - DELETE
diff --git a/kube/services/argo-events/workflows/job-admin-role.yaml b/kube/services/argo-events/workflows/job-admin-role.yaml
new file mode 100644
index 000000000..462652c97
--- /dev/null
+++ b/kube/services/argo-events/workflows/job-admin-role.yaml
@@ -0,0 +1,12 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: job-admin
+ namespace: argo-events
+rules:
+ - apiGroups:
+ - batch
+ resources:
+ - jobs
+ verbs:
+ - '*'
diff --git a/kube/services/argo-events/workflows/sensor-completed.yaml b/kube/services/argo-events/workflows/sensor-completed.yaml
new file mode 100644
index 000000000..293c0e119
--- /dev/null
+++ b/kube/services/argo-events/workflows/sensor-completed.yaml
@@ -0,0 +1,64 @@
+apiVersion: argoproj.io/v1alpha1
+kind: Sensor
+metadata:
+ name: argo-workflow-ended-sensor
+ namespace: argo-events
+spec:
+ template:
+ serviceAccountName: default
+ container:
+ env:
+ - name: DEBUG_LOG
+ value: "true"
+ dependencies:
+ - name: argo-workflow-ended
+ eventSourceName: argo-workflow-ended-source
+ eventName: workflow-ended
+ triggers:
+ - template:
+ name: log-event
+ log:
+ intervalSeconds: 10
+ - template:
+ name: argo-workflow
+ k8s:
+ operation: create
+ parameters:
+ - src:
+ dependencyName: argo-workflow-ended
+ dataKey: body.metadata.name
+ dest: spec.template.spec.containers.0.env.0.value
+ source:
+ resource:
+ apiVersion: batch/v1
+ kind: Job
+ metadata:
+ generateName: delete-karpenter-resources-
+ namespace: argo-events
+ labels:
+ workflow: ""
+ spec:
+ ttlSecondsAfterFinished: 900
+ completions: 1
+ parallelism: 1
+ template:
+ spec:
+ restartPolicy: OnFailure
+ containers:
+ - name: karpenter-resource-creator
+ image: quay.io/cdis/awshelper
+ command: ["/bin/sh"]
+ args:
+ - "-c"
+ - |
+ if kubectl get awsnodetemplate workflow-$WORKFLOW_NAME >/dev/null 2>&1; then
+ kubectl delete awsnodetemplate workflow-$WORKFLOW_NAME
+ fi
+
+ if kubectl get provisioner workflow-$WORKFLOW_NAME >/dev/null 2>&1; then
+ kubectl delete provisioners workflow-$WORKFLOW_NAME
+ fi
+ env:
+ - name: WORKFLOW_NAME
+ value: ""
+ backoffLimit: 20
diff --git a/kube/services/argo-events/workflows/sensor-created.yaml b/kube/services/argo-events/workflows/sensor-created.yaml
new file mode 100644
index 000000000..9f6de2c83
--- /dev/null
+++ b/kube/services/argo-events/workflows/sensor-created.yaml
@@ -0,0 +1,100 @@
+apiVersion: argoproj.io/v1alpha1
+kind: Sensor
+metadata:
+ name: argo-workflow-created-sensor
+ namespace: argo-events
+spec:
+ template:
+ serviceAccountName: default
+ container:
+ env:
+ - name: DEBUG_LOG
+ value: "true"
+ dependencies:
+ - name: workflow-created-event
+ eventSourceName: argo-workflow-created-source
+ eventName: workflow-created
+ triggers:
+ - template:
+ name: log-event
+ log:
+ intervalSeconds: 10
+ - template:
+ name: argo-workflow
+ k8s:
+ operation: create
+ parameters:
+ - src:
+ dependencyName: workflow-created-event
+ dataKey: body.metadata.name
+ dest: spec.template.spec.containers.0.env.0.value
+ - src:
+ dependencyName: workflow-created-event
+ dataKey: body.metadata.name
+ dest: metadata.labels.workflow
+ - src:
+ dependencyName: workflow-created-event
+ dataKey: body.metadata.labels.gen3username
+ dest: spec.template.spec.containers.0.env.1.value
+ source:
+ resource:
+ apiVersion: batch/v1
+ kind: Job
+ metadata:
+ generateName: create-karpenter-resources-
+ namespace: argo-events
+ labels:
+ workflow: ""
+ spec:
+ completions: 1
+ ttlSecondsAfterFinished: 900
+ parallelism: 1
+ template:
+ spec:
+ restartPolicy: OnFailure
+ containers:
+ - name: karpenter-resource-creator
+ image: quay.io/cdis/awshelper
+ command: ["/bin/sh"]
+ args:
+ - "-c"
+ - |
+ #!/bin/bash
+ if [ -z "$PROVISIONER_TEMPLATE" ]; then
+ PROVISIONER_TEMPLATE="provisioner.yaml"
+ fi
+
+ if [ -z "$AWSNODETEMPLATE_TEMPLATE" ]; then
+ AWSNODETEMPLATE_TEMPLATE="nodetemplate.yaml"
+ fi
+
+
+ if ! kubectl get awsnodetemplate workflow-$WORKFLOW_NAME >/dev/null 2>&1; then
+ sed -e "s/WORKFLOW_NAME/$WORKFLOW_NAME/" -e "s/GEN3_USERNAME/$GEN3_USERNAME/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$AWSNODETEMPLATE_TEMPLATE" | kubectl apply -f -
+ fi
+
+ if ! kubectl get provisioner workflow-$WORKFLOW_NAME >/dev/null 2>&1; then
+ sed -e "s/WORKFLOW_NAME/$WORKFLOW_NAME/" -e "s/GEN3_USERNAME/$GEN3_USERNAME/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$PROVISIONER_TEMPLATE" | kubectl apply -f -
+ fi
+ env:
+ - name: WORKFLOW_NAME
+ value: ""
+ - name: GEN3_USERNAME
+ value: ""
+ - name: ENVIRONMENT
+ valueFrom:
+ configMapKeyRef:
+ name: environment
+ key: environment
+ - name: PROVISIONER_TEMPLATE
+ value: /manifests/provisioner.yaml
+ - name: AWSNODETEMPLATE_TEMPLATE
+ value: /manifests/nodetemplate.yaml
+ volumeMounts:
+ - name: karpenter-templates-volume
+ mountPath: /manifests
+ volumes:
+ - name: karpenter-templates-volume
+ configMap:
+ name: karpenter-templates
+ backoffLimit: 20
diff --git a/kube/services/argo-events/workflows/sensor-deleted.yaml b/kube/services/argo-events/workflows/sensor-deleted.yaml
new file mode 100644
index 000000000..c235a820a
--- /dev/null
+++ b/kube/services/argo-events/workflows/sensor-deleted.yaml
@@ -0,0 +1,60 @@
+apiVersion: argoproj.io/v1alpha1
+kind: Sensor
+metadata:
+ name: argo-workflow-deleted-sensor
+ namespace: argo-events
+spec:
+ template:
+ serviceAccountName: default
+ dependencies:
+ - name: argo-workflow-deleted
+ eventSourceName: argo-workflow-deleted-source
+ eventName: workflow-deleted
+ triggers:
+ - template:
+ name: log-event
+ log:
+ intervalSeconds: 10
+ - template:
+ name: argo-workflow
+ k8s:
+ operation: create
+ parameters:
+ - src:
+ dependencyName: argo-workflow-deleted
+ dataKey: body.metadata.name
+ dest: spec.template.spec.containers.0.env.0.value
+ source:
+ resource:
+ apiVersion: batch/v1
+ kind: Job
+ metadata:
+ generateName: delete-karpenter-resources-
+ namespace: argo-events
+ labels:
+ workflow: ""
+ spec:
+ ttlSecondsAfterFinished: 900
+ completions: 1
+ parallelism: 1
+ template:
+ spec:
+ restartPolicy: OnFailure
+ containers:
+ - name: karpenter-resource-creator
+ image: quay.io/cdis/awshelper
+ command: ["/bin/sh"]
+ args:
+ - "-c"
+ - |
+ if kubectl get awsnodetemplate workflow-$WORKFLOW_NAME >/dev/null 2>&1; then
+ kubectl delete awsnodetemplate workflow-$WORKFLOW_NAME
+ fi
+
+ if kubectl get provisioner workflow-$WORKFLOW_NAME >/dev/null 2>&1; then
+ kubectl delete provisioners workflow-$WORKFLOW_NAME
+ fi
+ env:
+ - name: WORKFLOW_NAME
+ value: ""
+ backoffLimit: 20
diff --git a/kube/services/argo-wrapper/argo-wrapper-deploy.yaml b/kube/services/argo-wrapper/argo-wrapper-deploy.yaml
index cbd734739..89ec29ecc 100644
--- a/kube/services/argo-wrapper/argo-wrapper-deploy.yaml
+++ b/kube/services/argo-wrapper/argo-wrapper-deploy.yaml
@@ -28,7 +28,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -37,11 +37,31 @@ spec:
values:
- argo-wrapper
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
volumes:
- name: argo-config
configMap:
name: manifest-argo
-
+ optional: true
+ - name: argo-wrapper-namespace-config
+ configMap:
+ name: argo-wrapper-namespace-config
+
containers:
- name: argo-wrapper
GEN3_ARGO-WRAPPER_IMAGE
@@ -53,3 +73,7 @@ spec:
readOnly: true
mountPath: /argo.json
subPath: argo.json
+ - name: argo-wrapper-namespace-config
+ readOnly: true
+ mountPath: /argowrapper/config.ini
+ subPath: config.ini
diff --git a/kube/services/argo-wrapper/config.ini b/kube/services/argo-wrapper/config.ini
new file mode 100644
index 000000000..0693ee2e2
--- /dev/null
+++ b/kube/services/argo-wrapper/config.ini
@@ -0,0 +1,6 @@
+[DEFAULT]
+ARGO_ACCESS_METHOD = access
+ARGO_HOST = $ARGO_HOST
+ARGO_NAMESPACE = $ARGO_NAMESPACE
+COHORT_DEFINITION_BY_SOURCE_AND_TEAM_PROJECT_URL = http://cohort-middleware-service/cohortdefinition-stats/by-source-id/{}/by-team-project?team-project={}
+COHORT_MIDDLEWARE_URL = http://cohort-middleware-service
diff --git a/kube/services/argo/values.yaml b/kube/services/argo/values.yaml
index bf407d651..23dda4a5a 100644
--- a/kube/services/argo/values.yaml
+++ b/kube/services/argo/values.yaml
@@ -1,11 +1,46 @@
controller:
parallelism: 10
+ namespaceParallelism: 5
metricsConfig:
# -- Enables prometheus metrics server
- enabled: false
+ enabled: true
+ servicePort: 9090
+
+ resources:
+ requests:
+ memory: 8Gi
+ limits:
+ memory: 8Gi
+
+ podAnnotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/path: /metrics
+ prometheus.io/port: "9090"
+
+ ad.datadoghq.com/controller.checks: |
+ {
+ "openmetrics": {
+ "init_config": {},
+ "instances": [
+ {
+ "openmetrics_endpoint": "http://%%host%%:%%port%%/metrics ",
+ "namespace": "argo",
+ "metrics": ["*"]
+ }
+ ]
+ }
+ }
+
+ resourceRateLimit:
+ limit: 40
+ burst: 4
# -- enable persistence using postgres
persistence:
+ connectionPool:
+ maxIdleConns: 100
+ maxOpenConns: 0
+ connMaxLifetime: 300s
archive: true
archiveLabelSelector:
matchLabels:
@@ -15,15 +50,16 @@ controller:
port: 5432
database: GEN3_ARGO_DB_NAME
tableName: argo_workflows
- # # the database secrets must be in the same namespace of the controller
+ # # the database secrets must be in the same namespace of the controller
userNameSecret:
name: argo-db-creds
key: db_username
passwordSecret:
name: argo-db-creds
key: db_password
+ nodeStatusOffLoad: true
- workflowDefaults:
+ workflowDefaults:
spec:
archiveLogs: true
@@ -42,11 +78,16 @@ server:
baseHref: "/argo/"
# -- Extra arguments to provide to the Argo server binary, such as for disabling authentication.
extraArgs:
- - --auth-mode=server
- - --auth-mode=client
+ - --auth-mode=server
+ - --auth-mode=client
extraEnv:
- - name: ARGO_HTTP1
- value: "true"
+ - name: ARGO_HTTP1
+ value: "true"
+ resources:
+ requests:
+ memory: 8Gi
+ limits:
+ memory: 8Gi
# -- Influences the creation of the ConfigMap for the workflow-controller itself.
useDefaultArtifactRepo: true
diff --git a/kube/services/argo/workflows/fence-usersync-cron.yaml b/kube/services/argo/workflows/fence-usersync-cron.yaml
new file mode 100644
index 000000000..4723ce10f
--- /dev/null
+++ b/kube/services/argo/workflows/fence-usersync-cron.yaml
@@ -0,0 +1,10 @@
+apiVersion: argoproj.io/v1alpha1
+kind: CronWorkflow
+metadata:
+ name: fence-usersync-cron
+spec:
+ serviceAccountName: argo
+ schedule: "*/30 * * * *"
+ workflowSpec:
+ workflowTemplateRef:
+ name: fence-usersync-workflow
diff --git a/kube/services/argo/workflows/fence-usersync-wf.yaml b/kube/services/argo/workflows/fence-usersync-wf.yaml
new file mode 100644
index 000000000..d7f56a2ce
--- /dev/null
+++ b/kube/services/argo/workflows/fence-usersync-wf.yaml
@@ -0,0 +1,257 @@
+apiVersion: argoproj.io/v1alpha1
+kind: WorkflowTemplate
+metadata:
+ name: fence-usersync-workflow
+spec:
+ volumeClaimTemplates:
+ - metadata:
+ name: shared-data
+ spec:
+ accessModes: [ "ReadWriteOnce" ]
+ resources:
+ requests:
+ storage: 1Gi
+ serviceAccountName: argo
+ entrypoint: fence-usersync
+ arguments:
+ parameters:
+ - name: ADD_DBGAP
+ value: "false"
+ - name: ONLY_DBGAP
+ value: "false"
+ templates:
+ - name: fence-usersync
+ steps:
+ - - name: wait-for-fence
+ template: wait-for-fence
+ - - name: awshelper
+ template: awshelper
+ - - name: usersyncer
+ template: usersyncer
+
+ - name: wait-for-fence
+ container:
+ image: curlimages/curl:latest
+ command: ["/bin/sh","-c"]
+ args: ["while [ $(curl -sw '%{http_code}' http://fence-service -o /dev/null) -ne 200 ]; do sleep 5; echo 'Waiting for fence...'; done"]
+
+ - name: awshelper
+ container:
+ image: quay.io/cdis/awshelper:master
+ imagePullPolicy: Always
+ securityContext:
+ runAsUser: 0
+ env:
+ - name: gen3Env
+ valueFrom:
+ configMapKeyRef:
+ name: global
+ key: hostname
+ - name: userYamlS3Path
+ valueFrom:
+ configMapKeyRef:
+ name: manifest-global
+ key: useryaml_s3path
+ - name: slackWebHook
+ value: None
+ volumeMounts:
+ - name: shared-data
+ mountPath: /mnt/shared
+ command: ["/bin/bash"]
+ args:
+ - "-c"
+ - |
+ GEN3_HOME=/home/ubuntu/cloud-automation
+ source "${GEN3_HOME}/gen3/lib/utils.sh"
+ gen3_load "gen3/gen3setup"
+
+ if [ "${userYamlS3Path}" = 'none' ]; then
+ # echo "using local user.yaml"
+ # cp /var/www/fence/user.yaml /mnt/shared/user.yaml
+ echo "s3 yaml not provided - bailing out"
+ exit 1
+ else
+ # -----------------
+ echo "awshelper downloading ${userYamlS3Path} to /mnt/shared/user.yaml"
+ n=0
+ until [ $n -ge 5 ]; do
+ echo "Download attempt $n"
+ aws s3 cp "${userYamlS3Path}" /mnt/shared/user.yaml && break
+ n=$[$n+1]
+ sleep 2
+ done
+ fi
+ if [[ ! -f /mnt/shared/user.yaml ]]; then
+ echo "awshelper failed to retrieve /mnt/shared/user.yaml"
+ exit 1
+ fi
+ #-----------
+ echo "awshelper updating etl configmap"
+ if ! gen3 gitops etl-convert < /mnt/shared/user.yaml > /tmp/user.yaml; then
+ echo "ERROR: failed to generate ETL config"
+ exit 1
+ fi
+ # kubectl delete configmap fence > /dev/null 2>&1
+ # kubectl create configmap fence --from-file=/tmp/user.yaml
+ if [ "${slackWebHook}" != 'None' ]; then
+ curl -X POST --data-urlencode "payload={\"text\": \"AWSHelper: Syncing users on ${gen3Env}\"}" "${slackWebHook}"
+ fi
+ echo "Helper exit ok"
+
+ - name: usersyncer
+ volumes:
+ - name: yaml-merge
+ configMap:
+ name: "fence-yaml-merge"
+ - name: config-volume
+ secret:
+ secretName: "fence-config"
+ - name: creds-volume
+ secret:
+ secretName: "fence-creds"
+ - name: fence-google-app-creds-secret-volume
+ secret:
+ secretName: "fence-google-app-creds-secret"
+ - name: fence-google-storage-creds-secret-volume
+ secret:
+ secretName: "fence-google-storage-creds-secret"
+ - name: fence-ssh-keys
+ secret:
+ secretName: "fence-ssh-keys"
+ defaultMode: 0400
+ - name: fence-sshconfig
+ configMap:
+ name: "fence-sshconfig"
+ - name: projects
+ configMap:
+ name: "projects"
+ container:
+ image: quay.io/cdis/fence:master
+ imagePullPolicy: Always
+ env:
+ - name: PYTHONPATH
+ value: /var/www/fence
+ - name: SYNC_FROM_DBGAP
+ valueFrom:
+ configMapKeyRef:
+ name: manifest-global
+ key: sync_from_dbgap
+ - name: ADD_DBGAP
+ value: "{{workflow.parameters.ADD_DBGAP}}"
+ - name: ONLY_DBGAP
+ value: "{{workflow.parameters.ONLY_DBGAP}}"
+ - name: SLACK_SEND_DBGAP
+ valueFrom:
+ configMapKeyRef:
+ name: manifest-global
+ key: slack_send_dbgap
+ optional: true
+ - name: slackWebHook
+ valueFrom:
+ configMapKeyRef:
+ name: global
+ key: slack_webhook
+ optional: true
+ - name: gen3Env
+ valueFrom:
+ configMapKeyRef:
+ name: global
+ key: hostname
+ - name: FENCE_PUBLIC_CONFIG
+ valueFrom:
+ configMapKeyRef:
+ name: manifest-fence
+ key: fence-config-public.yaml
+ optional: true
+ volumeMounts:
+ - name: shared-data
+ mountPath: /mnt/shared
+ - name: "config-volume"
+ readOnly: true
+ mountPath: "/var/www/fence/fence-config.yaml"
+ subPath: fence-config.yaml
+ - name: "creds-volume"
+ readOnly: true
+ mountPath: "/var/www/fence/creds.json"
+ - name: "yaml-merge"
+ readOnly: true
+ mountPath: "/var/www/fence/yaml_merge.py"
+ - name: "fence-google-app-creds-secret-volume"
+ readOnly: true
+ mountPath: "/var/www/fence/fence_google_app_creds_secret.json"
+ subPath: fence_google_app_creds_secret.json
+ - name: "fence-google-storage-creds-secret-volume"
+ readOnly: true
+ mountPath: "/var/www/fence/fence_google_storage_creds_secret.json"
+ subPath: fence_google_storage_creds_secret.json
+ - name: "fence-ssh-keys"
+ mountPath: "/root/.ssh/id_rsa"
+ subPath: "id_rsa"
+ - name: "fence-ssh-keys"
+ mountPath: "/root/.ssh/id_rsa.pub"
+ subPath: "id_rsa.pub"
+ - name: "fence-sshconfig"
+ mountPath: "/root/.ssh/config"
+ subPath: "config"
+ - name: "projects"
+ mountPath: "/var/www/fence/projects.yaml"
+ subPath: "projects.yaml"
+ command: ["/bin/bash"]
+ args:
+ - "-c"
+ # Script always succeeds if it runs (echo exits with 0)
+ - |
+ echo "${ADD_DBGAP}"
+ echo "${ONLY_DBGAP}"
+ echo "${FENCE_PUBLIC_CONFIG:-""}" > "/var/www/fence/fence-config-public.yaml"
+ python /var/www/fence/yaml_merge.py /var/www/fence/fence-config-public.yaml /var/www/fence/fence-config-secret.yaml > /var/www/fence/fence-config.yaml
+ echo 'options use-vc' >> /etc/resolv.conf
+ let count=0
+ while [[ ! -f /mnt/shared/user.yaml && $count -lt 50 ]]; do
+ echo "fence container waiting for /mnt/shared/user.yaml";
+ sleep 2
+ let count=$count+1
+ done
+ if [[ "$SYNC_FROM_DBGAP" != True && "$ADD_DBGAP" != "true" ]]; then
+ if [[ -f /mnt/shared/user.yaml ]]; then
+ echo "running fence-create"
+ time fence-create sync --arborist http://arborist-service --yaml /mnt/shared/user.yaml
+ else
+ echo "/mnt/shared/user.yaml did not appear within timeout :-("
+ false # non-zero exit code
+ fi
+ exitcode=$?
+ else
+ output=$(mktemp "/tmp/fence-create-output_XXXXXX")
+ if [[ -f /mnt/shared/user.yaml && "$ONLY_DBGAP" != "true" ]]; then
+ echo "Running fence-create dbgap-sync with user.yaml - see $output"
+ time fence-create sync --arborist http://arborist-service --sync_from_dbgap "True" --projects /var/www/fence/projects.yaml --yaml /mnt/shared/user.yaml 2>&1 | tee "$output"
+ else
+ echo "Running fence-create dbgap-sync without user.yaml - see $output"
+ time fence-create sync --arborist http://arborist-service --sync_from_dbgap "True" --projects /var/www/fence/projects.yaml 2>&1 | tee "$output"
+ fi
+ exitcode="${PIPESTATUS[0]}"
+ echo "$output"
+ # Echo what files we are seeing on dbgap ftp to Slack
+ # We only do this step every 12 hours and not on weekends to reduce noise
+ if [[ -n "$SLACK_SEND_DBGAP" && "$SLACK_SEND_DBGAP" = True ]]; then
+ files=$(grep "Reading file" "$output")
+ let hour=$(date -u +10#%H)
+ let dow=$(date -u +10#%u)
+ if ! (( hour % 12 )) && (( dow < 6 )); then
+ if [ "${slackWebHook}" != 'None' ]; then
+ curl -X POST --data-urlencode "payload={\"text\": \"FenceHelper: \n\`\`\`\n${files}\n\`\`\`\"}" "${slackWebHook}"
+ fi
+ fi
+ fi
+ fi
+ if [[ $exitcode -ne 0 && "${slackWebHook}" != 'None' ]]; then
+ emptyfile=$(grep "EnvironmentError:" "$output")
+ if [ ! -z "$emptyfile" ]; then
+ curl -X POST --data-urlencode "payload={\"text\": \"JOBSKIPPED: User sync skipped on ${gen3Env} ${emptyfile}\"}" "${slackWebHook}";
+ else
+ curl -X POST --data-urlencode "payload={\"text\": \"JOBFAIL: User sync failed on ${gen3Env}\"}" "${slackWebHook}"
+ fi
+ fi
+ echo "Exit code: $exitcode"
+ exit "$exitcode"
\ No newline at end of file
diff --git a/kube/services/argocd/values.yaml b/kube/services/argocd/values.yaml
new file mode 100644
index 000000000..4d799c055
--- /dev/null
+++ b/kube/services/argocd/values.yaml
@@ -0,0 +1,2894 @@
+## Argo CD configuration
+## Ref: https://github.com/argoproj/argo-cd
+##
+
+# -- Provide a name in place of `argocd`
+nameOverride: argocd
+# -- String to fully override `"argo-cd.fullname"`
+fullnameOverride: ""
+# -- Override the Kubernetes version, which is used to evaluate certain manifests
+kubeVersionOverride: ""
+# Override APIVersions
+# If you want to template helm charts but cannot access k8s API server
+# you can set api versions here
+apiVersionOverrides:
+ # -- String to override apiVersion of cert-manager resources rendered by this helm chart
+ certmanager: "" # cert-manager.io/v1
+ # -- String to override apiVersion of GKE resources rendered by this helm chart
+ cloudgoogle: "" # cloud.google.com/v1
+ # -- String to override apiVersion of autoscaling rendered by this helm chart
+ autoscaling: "" # autoscaling/v2
+
+# -- Create clusterroles that extend existing clusterroles to interact with argo-cd crds
+## Ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles
+createAggregateRoles: false
+
+openshift:
+ # -- enables using arbitrary uid for argo repo server
+ enabled: false
+
+## Custom resource configuration
+crds:
+ # -- Install and upgrade CRDs
+ install: true
+ # -- Keep CRDs on chart uninstall
+ keep: true
+ # -- Annotations to be added to all CRDs
+ annotations: {}
+
+## Globally shared configuration
+global:
+ # -- Common labels for the all resources
+ additionalLabels: {}
+ # app: argo-cd
+
+ # -- Number of old deployment ReplicaSets to retain. The rest will be garbage collected.
+ revisionHistoryLimit: 3
+
+ # Default image used by all components
+ image:
+ # -- If defined, a repository applied to all Argo CD deployments
+ repository: quay.io/argoproj/argocd
+ # -- Overrides the global Argo CD image tag whose default is the chart appVersion
+ tag: ""
+ # -- If defined, a imagePullPolicy applied to all Argo CD deployments
+ imagePullPolicy: IfNotPresent
+
+ # -- Secrets with credentials to pull images from a private registry
+ imagePullSecrets: []
+
+ # Default logging options used by all components
+ logging:
+ # -- Set the global logging format. Either: `text` or `json`
+ format: text
+ # -- Set the global logging level. One of: `debug`, `info`, `warn` or `error`
+ level: info
+
+ # -- Annotations for the all deployed Statefulsets
+ statefulsetAnnotations: {}
+
+ # -- Annotations for the all deployed Deployments
+ deploymentAnnotations: {}
+
+ # -- Annotations for the all deployed pods
+ podAnnotations: {}
+
+ # -- Labels for the all deployed pods
+ podLabels: {}
+
+ # -- Toggle and define pod-level security context.
+ # @default -- `{}` (See [values.yaml])
+ securityContext: {}
+ # runAsUser: 999
+ # runAsGroup: 999
+ # fsGroup: 999
+
+ # -- Mapping between IP and hostnames that will be injected as entries in the pod's hosts files
+ hostAliases: []
+ # - ip: 10.20.30.40
+ # hostnames:
+ # - git.myhostname
+
+ networkPolicy:
+ # -- Create NetworkPolicy objects for all components
+ create: false
+ # -- Default deny all ingress traffic
+ defaultDenyIngress: false
+
+## Argo Configs
+configs:
+ # General Argo CD configuration
+ ## Ref: https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/argocd-cm.yaml
+ cm:
+ # -- Create the argocd-cm configmap for [declarative setup]
+ create: true
+
+ # -- Annotations to be added to argocd-cm configmap
+ annotations: {}
+
+ # -- Argo CD's externally facing base URL (optional). Required when configuring SSO
+ url: ""
+
+ # -- The name of tracking label used by Argo CD for resource pruning
+ # @default -- Defaults to app.kubernetes.io/instance
+ application.instanceLabelKey: argocd.argoproj.io/instance
+
+ # -- Enable logs RBAC enforcement
+ ## Ref: https://argo-cd.readthedocs.io/en/latest/operator-manual/upgrading/2.3-2.4/#enable-logs-rbac-enforcement
+ server.rbac.log.enforce.enable: false
+
+ # -- Enable exec feature in Argo UI
+ ## Ref: https://argo-cd.readthedocs.io/en/latest/operator-manual/rbac/#exec-resource
+ exec.enabled: false
+
+ # -- Enable local admin user
+ ## Ref: https://argo-cd.readthedocs.io/en/latest/faq/#how-to-disable-admin-user
+ admin.enabled: true
+
+ # -- Timeout to discover if a new manifests version got published to the repository
+ timeout.reconciliation: 180s
+
+ # -- Timeout to refresh application data as well as target manifests cache
+ timeout.hard.reconciliation: 0s
+
+ # Dex configuration
+ # dex.config: |
+ # connectors:
+ # # GitHub example
+ # - type: github
+ # id: github
+ # name: GitHub
+ # config:
+ # clientID: aabbccddeeff00112233
+ # clientSecret: $dex.github.clientSecret # Alternatively $:dex.github.clientSecret
+ # orgs:
+ # - name: your-github-org
+
+ # OIDC configuration as an alternative to dex (optional).
+ # oidc.config: |
+ # name: AzureAD
+ # issuer: https://login.microsoftonline.com/TENANT_ID/v2.0
+ # clientID: CLIENT_ID
+ # clientSecret: $oidc.azuread.clientSecret
+ # rootCA: |
+ # -----BEGIN CERTIFICATE-----
+ # ... encoded certificate data here ...
+ # -----END CERTIFICATE-----
+ # requestedIDTokenClaims:
+ # groups:
+ # essential: true
+ # requestedScopes:
+ # - openid
+ # - profile
+ # - email
+
+ # Argo CD configuration parameters
+ ## Ref: https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/argocd-cmd-params-cm.yaml
+ params:
+ # -- Annotations to be added to the argocd-cmd-params-cm ConfigMap
+ annotations: {}
+
+ ## Generic parameters
+ # -- Open-Telemetry collector address: (e.g. "otel-collector:4317")
+ otlp.address: ''
+
+ ## Controller Properties
+ # -- Number of application status processors
+ controller.status.processors: 20
+ # -- Number of application operation processors
+ controller.operation.processors: 10
+ # -- Specifies timeout between application self heal attempts
+ controller.self.heal.timeout.seconds: 5
+ # -- Repo server RPC call timeout seconds.
+ controller.repo.server.timeout.seconds: 60
+
+ ## Server properties
+ # -- Run server without TLS
+ server.insecure: false
+ # -- Value for base href in index.html. Used if Argo CD is running behind reverse proxy under subpath different from /
+ server.basehref: "/argocd/"
+ # -- Used if Argo CD is running behind reverse proxy under subpath different from /
+ server.rootpath: ""
+ # -- Directory path that contains additional static assets
+ server.staticassets: /shared/app
+ # -- Disable Argo CD RBAC for user authentication
+ server.disable.auth: false
+ # -- Enable GZIP compression
+ server.enable.gzip: false
+ # -- Set X-Frame-Options header in HTTP responses to value. To disable, set to "".
+ server.x.frame.options: sameorigin
+
+ ## Repo-server properties
+ # -- Limit on number of concurrent manifests generate requests. Any value less the 1 means no limit.
+ reposerver.parallelism.limit: 0
+
+ # Argo CD RBAC policy configuration
+ ## Ref: https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/rbac.md
+ rbac:
+ # -- Create the argocd-rbac-cm configmap with ([Argo CD RBAC policy]) definitions.
+ # If false, it is expected the configmap will be created by something else.
+ # Argo CD will not work if there is no configmap created with the name above.
+ create: true
+
+ # -- Annotations to be added to argocd-rbac-cm configmap
+ annotations: {}
+
+ # -- The name of the default role which Argo CD will falls back to, when authorizing API requests (optional).
+ # If omitted or empty, users may be still be able to login, but will see no apps, projects, etc...
+ policy.default: ''
+
+ # -- File containing user-defined policies and role definitions.
+ # @default -- `''` (See [values.yaml])
+ policy.csv: ''
+ # Policy rules are in the form:
+ # p, subject, resource, action, object, effect
+ # Role definitions and bindings are in the form:
+ # g, subject, inherited-subject
+ # policy.csv |
+ # p, role:org-admin, applications, *, */*, allow
+ # p, role:org-admin, clusters, get, *, allow
+ # p, role:org-admin, repositories, *, *, allow
+ # p, role:org-admin, logs, get, *, allow
+ # p, role:org-admin, exec, create, */*, allow
+ # g, your-github-org:your-team, role:org-admin
+
+ # -- OIDC scopes to examine during rbac enforcement (in addition to `sub` scope).
+ # The scope value can be a string, or a list of strings.
+ scopes: "[groups]"
+
+ # GnuPG public keys for commit verification
+ ## Ref: https://argo-cd.readthedocs.io/en/stable/user-guide/gpg-verification/
+ gpg:
+ # -- Annotations to be added to argocd-gpg-keys-cm configmap
+ annotations: {}
+
+ # -- [GnuPG] public keys to add to the keyring
+ # @default -- `{}` (See [values.yaml])
+ ## Note: Public keys should be exported with `gpg --export --armor `
+ keys: {}
+ # 4AEE18F83AFDEB23: |
+ # -----BEGIN PGP PUBLIC KEY BLOCK-----
+ # ...
+ # -----END PGP PUBLIC KEY BLOCK-----
+
+
+ # -- Provide one or multiple [external cluster credentials]
+ # @default -- `[]` (See [values.yaml])
+ ## Ref:
+ ## - https://argo-cd.readthedocs.io/en/stable/operator-manual/declarative-setup/#clusters
+ ## - https://argo-cd.readthedocs.io/en/stable/operator-manual/security/#external-cluster-credentials
+ clusterCredentials: []
+ # - name: mycluster
+ # server: https://mycluster.com
+ # labels: {}
+ # annotations: {}
+ # config:
+ # bearerToken: ""
+ # tlsClientConfig:
+ # insecure: false
+ # caData: ""
+ # - name: mycluster2
+ # server: https://mycluster2.com
+ # labels: {}
+ # annotations: {}
+ # namespaces: namespace1,namespace2
+ # clusterResources: true
+ # config:
+ # bearerToken: ""
+ # tlsClientConfig:
+ # insecure: false
+ # caData: ""
+
+ # -- Known Hosts configmap annotations
+ knownHostsAnnotations: {}
+ knownHosts:
+ data:
+ # -- Known Hosts
+ # @default -- See [values.yaml]
+ ssh_known_hosts: |
+ bitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kNvEcqTKl/VqLat/MaB33pZy0y3rJZtnqwR2qOOvbwKZYKiEO1O6VqNEBxKvJJelCq0dTXWT5pbO2gDXC6h6QDXCaHo6pOHGPUy+YBaGQRGuSusMEASYiWunYN0vCAI8QaXnWMXNMdFP3jHAJH0eDsoiGnLPBlBp4TNm6rYI74nMzgz3B9IikW4WVK+dc8KZJZWYjAuORU3jc1c/NPskD2ASinf8v3xnfXeukU0sJ5N6m5E8VLjObPEO+mN2t/FZTMZLiFqPWc/ALSqnMnnhwrNi2rbfg/rd/IpL8Le3pSBne8+seeFVBoGqzHM9yXw==
+ github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=
+ github.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl
+ github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==
+ gitlab.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY=
+ gitlab.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf
+ gitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9
+ ssh.dev.azure.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H
+ vs-ssh.visualstudio.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H
+ # -- TLS certificate configmap annotations
+ tlsCertsAnnotations: {}
+ # -- TLS certificate
+ # @default -- See [values.yaml]
+ tlsCerts:
+ {}
+ # data:
+ # argocd.example.com: |
+ # -----BEGIN CERTIFICATE-----
+ # MIIF1zCCA7+gAwIBAgIUQdTcSHY2Sxd3Tq/v1eIEZPCNbOowDQYJKoZIhvcNAQEL
+ # BQAwezELMAkGA1UEBhMCREUxFTATBgNVBAgMDExvd2VyIFNheG9ueTEQMA4GA1UE
+ # BwwHSGFub3ZlcjEVMBMGA1UECgwMVGVzdGluZyBDb3JwMRIwEAYDVQQLDAlUZXN0
+ # c3VpdGUxGDAWBgNVBAMMD2Jhci5leGFtcGxlLmNvbTAeFw0xOTA3MDgxMzU2MTda
+ # Fw0yMDA3MDcxMzU2MTdaMHsxCzAJBgNVBAYTAkRFMRUwEwYDVQQIDAxMb3dlciBT
+ # YXhvbnkxEDAOBgNVBAcMB0hhbm92ZXIxFTATBgNVBAoMDFRlc3RpbmcgQ29ycDES
+ # MBAGA1UECwwJVGVzdHN1aXRlMRgwFgYDVQQDDA9iYXIuZXhhbXBsZS5jb20wggIi
+ # MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCv4mHMdVUcafmaSHVpUM0zZWp5
+ # NFXfboxA4inuOkE8kZlbGSe7wiG9WqLirdr39Ts+WSAFA6oANvbzlu3JrEQ2CHPc
+ # CNQm6diPREFwcDPFCe/eMawbwkQAPVSHPts0UoRxnpZox5pn69ghncBR+jtvx+/u
+ # P6HdwW0qqTvfJnfAF1hBJ4oIk2AXiip5kkIznsAh9W6WRy6nTVCeetmIepDOGe0G
+ # ZJIRn/OfSz7NzKylfDCat2z3EAutyeT/5oXZoWOmGg/8T7pn/pR588GoYYKRQnp+
+ # YilqCPFX+az09EqqK/iHXnkdZ/Z2fCuU+9M/Zhrnlwlygl3RuVBI6xhm/ZsXtL2E
+ # Gxa61lNy6pyx5+hSxHEFEJshXLtioRd702VdLKxEOuYSXKeJDs1x9o6cJ75S6hko
+ # Ml1L4zCU+xEsMcvb1iQ2n7PZdacqhkFRUVVVmJ56th8aYyX7KNX6M9CD+kMpNm6J
+ # kKC1li/Iy+RI138bAvaFplajMF551kt44dSvIoJIbTr1LigudzWPqk31QaZXV/4u
+ # kD1n4p/XMc9HYU/was/CmQBFqmIZedTLTtK7clkuFN6wbwzdo1wmUNgnySQuMacO
+ # gxhHxxzRWxd24uLyk9Px+9U3BfVPaRLiOPaPoC58lyVOykjSgfpgbus7JS69fCq7
+ # bEH4Jatp/10zkco+UQIDAQABo1MwUTAdBgNVHQ4EFgQUjXH6PHi92y4C4hQpey86
+ # r6+x1ewwHwYDVR0jBBgwFoAUjXH6PHi92y4C4hQpey86r6+x1ewwDwYDVR0TAQH/
+ # BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAFE4SdKsX9UsLy+Z0xuHSxhTd0jfn
+ # Iih5mtzb8CDNO5oTw4z0aMeAvpsUvjJ/XjgxnkiRACXh7K9hsG2r+ageRWGevyvx
+ # CaRXFbherV1kTnZw4Y9/pgZTYVWs9jlqFOppz5sStkfjsDQ5lmPJGDii/StENAz2
+ # XmtiPOgfG9Upb0GAJBCuKnrU9bIcT4L20gd2F4Y14ccyjlf8UiUi192IX6yM9OjT
+ # +TuXwZgqnTOq6piVgr+FTSa24qSvaXb5z/mJDLlk23npecTouLg83TNSn3R6fYQr
+ # d/Y9eXuUJ8U7/qTh2Ulz071AO9KzPOmleYPTx4Xty4xAtWi1QE5NHW9/Ajlv5OtO
+ # OnMNWIs7ssDJBsB7VFC8hcwf79jz7kC0xmQqDfw51Xhhk04kla+v+HZcFW2AO9so
+ # 6ZdVHHQnIbJa7yQJKZ+hK49IOoBR6JgdB5kymoplLLiuqZSYTcwSBZ72FYTm3iAr
+ # jzvt1hxpxVDmXvRnkhRrIRhK4QgJL0jRmirBjDY+PYYd7bdRIjN7WNZLFsgplnS8
+ # 9w6CwG32pRlm0c8kkiQ7FXA6BYCqOsDI8f1VGQv331OpR2Ck+FTv+L7DAmg6l37W
+ # +LB9LGh4OAp68ImTjqf6ioGKG0RBSznwME+r4nXtT1S/qLR6ASWUS4ViWRhbRlNK
+ # XWyb96wrUlv+E8I=
+ # -----END CERTIFICATE-----
+
+ # -- Repository credentials to be used as Templates for other repos
+ ## Creates a secret for each key/value specified below to create repository credentials
+ credentialTemplates: {}
+ # github-enterprise-creds-1:
+ # url: https://github.com/argoproj
+ # githubAppID: 1
+ # githubAppInstallationID: 2
+ # githubAppEnterpriseBaseUrl: https://ghe.example.com/api/v3
+ # githubAppPrivateKey: |
+ # -----BEGIN OPENSSH PRIVATE KEY-----
+ # ...
+ # -----END OPENSSH PRIVATE KEY-----
+ # https-creds:
+ # url: https://github.com/argoproj
+ # password: my-password
+ # username: my-username
+ # ssh-creds:
+ # url: git@github.com:argoproj-labs
+ # sshPrivateKey: |
+ # -----BEGIN OPENSSH PRIVATE KEY-----
+ # ...
+ # -----END OPENSSH PRIVATE KEY-----
+
+ # -- Annotations to be added to `configs.credentialTemplates` Secret
+ credentialTemplatesAnnotations: {}
+
+ # -- Repositories list to be used by applications
+ ## Creates a secret for each key/value specified below to create repositories
+ ## Note: the last example in the list would use a repository credential template, configured under "configs.repositoryCredentials".
+ repositories: {}
+ # istio-helm-repo:
+ # url: https://storage.googleapis.com/istio-prerelease/daily-build/master-latest-daily/charts
+ # name: istio.io
+ # type: helm
+ # private-helm-repo:
+ # url: https://my-private-chart-repo.internal
+ # name: private-repo
+ # type: helm
+ # password: my-password
+ # username: my-username
+ # private-repo:
+ # url: https://github.com/argoproj/private-repo
+
+ # -- Annotations to be added to `configs.repositories` Secret
+ repositoriesAnnotations: {}
+
+ # Argo CD sensitive data
+ # Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/user-management/#sensitive-data-and-sso-client-secrets
+ secret:
+ # -- Create the argocd-secret
+ createSecret: true
+ # -- Annotations to be added to argocd-secret
+ annotations: {}
+
+ # -- Shared secret for authenticating GitHub webhook events
+ githubSecret: ""
+ # -- Shared secret for authenticating GitLab webhook events
+ gitlabSecret: ""
+ # -- Shared secret for authenticating BitbucketServer webhook events
+ bitbucketServerSecret: ""
+ # -- UUID for authenticating Bitbucket webhook events
+ bitbucketUUID: ""
+ # -- Shared secret for authenticating Gogs webhook events
+ gogsSecret: ""
+
+ # -- add additional secrets to be added to argocd-secret
+ ## Custom secrets. Useful for injecting SSO secrets into environment variables.
+ ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/user-management/#sensitive-data-and-sso-client-secrets
+ ## Note that all values must be non-empty.
+ extra:
+ {}
+ # LDAP_PASSWORD: "mypassword"
+
+ # -- Argo TLS Data
+ # DEPRECATED - Use server.certificate or server.certificateSecret
+ # argocdServerTlsConfig:
+ # key: ''
+ # crt: ''
+
+ # -- Bcrypt hashed admin password
+ ## Argo expects the password in the secret to be bcrypt hashed. You can create this hash with
+ ## `htpasswd -nbBC 10 "" $ARGO_PWD | tr -d ':\n' | sed 's/$2y/$2a/'`
+ argocdServerAdminPassword: ""
+ # -- Admin password modification time. Eg. `"2006-01-02T15:04:05Z"`
+ # @default -- `""` (defaults to current time)
+ argocdServerAdminPasswordMtime: ""
+
+ # -- Define custom [CSS styles] for your argo instance.
+ # This setting will automatically mount the provided CSS and reference it in the argo configuration.
+ # @default -- `""` (See [values.yaml])
+ ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/custom-styles/
+ styles: ""
+ # styles: |
+ # .nav-bar {
+ # background: linear-gradient(to bottom, #999, #777, #333, #222, #111);
+ # }
+
+# -- Array of extra K8s manifests to deploy
+extraObjects: []
+ # - apiVersion: secrets-store.csi.x-k8s.io/v1
+ # kind: SecretProviderClass
+ # metadata:
+ # name: argocd-secrets-store
+ # spec:
+ # provider: aws
+ # parameters:
+ # objects: |
+ # - objectName: "argocd"
+ # objectType: "secretsmanager"
+ # jmesPath:
+ # - path: "client_id"
+ # objectAlias: "client_id"
+ # - path: "client_secret"
+ # objectAlias: "client_secret"
+ # secretObjects:
+ # - data:
+ # - key: client_id
+ # objectName: client_id
+ # - key: client_secret
+ # objectName: client_secret
+ # secretName: argocd-secrets-store
+ # type: Opaque
+ # labels:
+ # app.kubernetes.io/part-of: argocd
+
+## Application controller
+controller:
+ # -- Application controller name string
+ name: application-controller
+
+ # -- The number of application controller pods to run.
+ # Additional replicas will cause sharding of managed clusters across number of replicas.
+ replicas: 1
+
+ ## Application controller Pod Disruption Budget
+ ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
+ pdb:
+ # -- Deploy a [PodDisruptionBudget] for the application controller
+ enabled: false
+ # -- Labels to be added to application controller pdb
+ labels: {}
+ # -- Annotations to be added to application controller pdb
+ annotations: {}
+ # -- Number of pods that are available after eviction as number or percentage (eg.: 50%)
+ # @default -- `""` (defaults to 0 if not specified)
+ minAvailable: ""
+ # -- Number of pods that are unavailble after eviction as number or percentage (eg.: 50%).
+ ## Has higher precedence over `controller.pdb.minAvailable`
+ maxUnavailable: ""
+
+ ## Application controller image
+ image:
+ # -- Repository to use for the application controller
+ # @default -- `""` (defaults to global.image.repository)
+ repository: ""
+ # -- Tag to use for the application controller
+ # @default -- `""` (defaults to global.image.tag)
+ tag: ""
+ # -- Image pull policy for the application controller
+ # @default -- `""` (defaults to global.image.imagePullPolicy)
+ imagePullPolicy: ""
+
+ # -- Secrets with credentials to pull images from a private registry
+ # @default -- `[]` (defaults to global.imagePullSecrets)
+ imagePullSecrets: []
+
+ # -- DEPRECATED - Application controller commandline flags
+ args: {}
+ # DEPRECATED - Use configs.params to override
+ # # -- define the application controller `--status-processors`
+ # statusProcessors: "20"
+ # # -- define the application controller `--operation-processors`
+ # operationProcessors: "10"
+ # # -- define the application controller `--app-hard-resync`
+ # appHardResyncPeriod: "0"
+ # # -- define the application controller `--app-resync`
+ # appResyncPeriod: "180"
+ # # -- define the application controller `--self-heal-timeout-seconds`
+ # selfHealTimeout: "5"
+ # # -- define the application controller `--repo-server-timeout-seconds`
+ # repoServerTimeoutSeconds: "60"
+
+ # -- Additional command line arguments to pass to application controller
+ extraArgs: []
+
+ # -- Environment variables to pass to application controller
+ env: []
+
+ # -- envFrom to pass to application controller
+ # @default -- `[]` (See [values.yaml])
+ envFrom: []
+ # - configMapRef:
+ # name: config-map-name
+ # - secretRef:
+ # name: secret-name
+
+ # -- Additional containers to be added to the application controller pod
+ extraContainers: []
+
+ # -- Init containers to add to the application controller pod
+ ## If your target Kubernetes cluster(s) require a custom credential (exec) plugin
+ ## you could use this (and the same in the server pod) to provide such executable
+ ## Ref: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#client-go-credential-plugins
+ initContainers: []
+ # - name: download-tools
+ # image: alpine:3
+ # command: [sh, -c]
+ # args:
+ # - wget -qO kubelogin.zip https://github.com/Azure/kubelogin/releases/download/v0.0.25/kubelogin-linux-amd64.zip &&
+ # unzip kubelogin.zip && mv bin/linux_amd64/kubelogin /custom-tools/
+ # volumeMounts:
+ # - mountPath: /custom-tools
+ # name: custom-tools
+
+ # -- Additional volumeMounts to the application controller main container
+ volumeMounts: []
+ # - mountPath: /usr/local/bin/kubelogin
+ # name: custom-tools
+ # subPath: kubelogin
+
+ # -- Additional volumes to the application controller pod
+ volumes: []
+ # - name: custom-tools
+ # emptyDir: {}
+
+ # -- Annotations for the application controller StatefulSet
+ statefulsetAnnotations: {}
+
+ # -- Annotations to be added to application controller pods
+ podAnnotations: {}
+
+ # -- Labels to be added to application controller pods
+ podLabels: {}
+
+ # -- Resource limits and requests for the application controller pods
+ resources: {}
+ # limits:
+ # cpu: 500m
+ # memory: 512Mi
+ # requests:
+ # cpu: 250m
+ # memory: 256Mi
+
+ # -- Application controller container-level security context
+ # @default -- See [values.yaml]
+ containerSecurityContext:
+ runAsNonRoot: true
+ readOnlyRootFilesystem: true
+ allowPrivilegeEscalation: false
+ seccompProfile:
+ type: RuntimeDefault
+ capabilities:
+ drop:
+ - ALL
+
+ # -- Application controller listening port
+ containerPort: 8082
+
+ # Rediness probe for application controller
+ ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
+ readinessProbe:
+ # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded
+ failureThreshold: 3
+ # -- Number of seconds after the container has started before [probe] is initiated
+ initialDelaySeconds: 10
+ # -- How often (in seconds) to perform the [probe]
+ periodSeconds: 10
+ # -- Minimum consecutive successes for the [probe] to be considered successful after having failed
+ successThreshold: 1
+ # -- Number of seconds after which the [probe] times out
+ timeoutSeconds: 1
+
+ # -- [Node selector]
+ nodeSelector: {}
+
+ # -- [Tolerations] for use with node taints
+ tolerations: []
+
+ # -- Assign custom [affinity] rules to the deployment
+ affinity: {}
+
+ # -- Assign custom [TopologySpreadConstraints] rules to the application controller
+ ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
+ ## If labelSelector is left out, it will default to the labelSelector configuration of the deployment
+ topologySpreadConstraints: []
+ # - maxSkew: 1
+ # topologyKey: topology.kubernetes.io/zone
+ # whenUnsatisfiable: DoNotSchedule
+
+ # -- Priority class for the application controller pods
+ priorityClassName: ""
+
+ serviceAccount:
+ # -- Create a service account for the application controller
+ create: true
+ # -- Service account name
+ name: argocd-application-controller
+ # -- Annotations applied to created service account
+ annotations: {}
+ # -- Labels applied to created service account
+ labels: {}
+ # -- Automount API credentials for the Service Account
+ automountServiceAccountToken: true
+
+ ## Application controller metrics configuration
+ metrics:
+ # -- Deploy metrics service
+ enabled: false
+ applicationLabels:
+ # -- Enables additional labels in argocd_app_labels metric
+ enabled: false
+ # -- Additional labels
+ labels: []
+ service:
+ # -- Metrics service annotations
+ annotations: {}
+ # -- Metrics service labels
+ labels: {}
+ # -- Metrics service port
+ servicePort: 8082
+ # -- Metrics service port name
+ portName: http-metrics
+ serviceMonitor:
+ # -- Enable a prometheus ServiceMonitor
+ enabled: false
+ # -- Prometheus ServiceMonitor interval
+ interval: 30s
+ # -- Prometheus [RelabelConfigs] to apply to samples before scraping
+ relabelings: []
+ # -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestion
+ metricRelabelings: []
+ # -- Prometheus ServiceMonitor selector
+ selector: {}
+ # prometheus: kube-prometheus
+
+ # -- Prometheus ServiceMonitor scheme
+ scheme: ""
+ # -- Prometheus ServiceMonitor tlsConfig
+ tlsConfig: {}
+ # -- Prometheus ServiceMonitor namespace
+ namespace: "" # "monitoring"
+ # -- Prometheus ServiceMonitor labels
+ additionalLabels: {}
+ # -- Prometheus ServiceMonitor annotations
+ annotations: {}
+ rules:
+ # -- Deploy a PrometheusRule for the application controller
+ enabled: false
+ # -- PrometheusRule.Spec for the application controller
+ spec: []
+ # - alert: ArgoAppMissing
+ # expr: |
+ # absent(argocd_app_info) == 1
+ # for: 15m
+ # labels:
+ # severity: critical
+ # annotations:
+ # summary: "[Argo CD] No reported applications"
+ # description: >
+ # Argo CD has not reported any applications data for the past 15 minutes which
+ # means that it must be down or not functioning properly. This needs to be
+ # resolved for this cloud to continue to maintain state.
+ # - alert: ArgoAppNotSynced
+ # expr: |
+ # argocd_app_info{sync_status!="Synced"} == 1
+ # for: 12h
+ # labels:
+ # severity: warning
+ # annotations:
+ # summary: "[{{`{{$labels.name}}`}}] Application not synchronized"
+ # description: >
+ # The application [{{`{{$labels.name}}`}} has not been synchronized for over
+ # 12 hours which means that the state of this cloud has drifted away from the
+ # state inside Git.
+ # selector:
+ # prometheus: kube-prometheus
+ # namespace: monitoring
+ # additionalLabels: {}
+ # annotations: {}
+
+ ## Enable if you would like to grant rights to Argo CD to deploy to the local Kubernetes cluster.
+ clusterAdminAccess:
+ # -- Enable RBAC for local cluster deployments
+ enabled: true
+
+ ## Enable this and set the rules: to whatever custom rules you want for the Cluster Role resource.
+ ## Defaults to off
+ clusterRoleRules:
+ # -- Enable custom rules for the application controller's ClusterRole resource
+ enabled: false
+ # -- List of custom rules for the application controller's ClusterRole resource
+ rules: []
+
+## Dex
+dex:
+ # -- Enable dex
+ enabled: false
+ # -- Dex name
+ name: dex-server
+
+ # -- Additional command line arguments to pass to the Dex server
+ extraArgs: []
+
+ metrics:
+ # -- Deploy metrics service
+ enabled: false
+ service:
+ # -- Metrics service annotations
+ annotations: {}
+ # -- Metrics service labels
+ labels: {}
+ # -- Metrics service port name
+ portName: http-metrics
+ serviceMonitor:
+ # -- Enable a prometheus ServiceMonitor
+ enabled: false
+ # -- Prometheus ServiceMonitor interval
+ interval: 30s
+ # -- Prometheus [RelabelConfigs] to apply to samples before scraping
+ relabelings: []
+ # -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestion
+ metricRelabelings: []
+ # -- Prometheus ServiceMonitor selector
+ selector: {}
+ # prometheus: kube-prometheus
+
+ # -- Prometheus ServiceMonitor scheme
+ scheme: ""
+ # -- Prometheus ServiceMonitor tlsConfig
+ tlsConfig: {}
+ # -- Prometheus ServiceMonitor namespace
+ namespace: "" # "monitoring"
+ # -- Prometheus ServiceMonitor labels
+ additionalLabels: {}
+ # -- Prometheus ServiceMonitor annotations
+ annotations: {}
+
+ ## Dex Pod Disruption Budget
+ ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
+ pdb:
+ # -- Deploy a [PodDisruptionBudget] for the Dex server
+ enabled: false
+ # -- Labels to be added to Dex server pdb
+ labels: {}
+ # -- Annotations to be added to Dex server pdb
+ annotations: {}
+ # -- Number of pods that are available after eviction as number or percentage (eg.: 50%)
+ # @default -- `""` (defaults to 0 if not specified)
+ minAvailable: ""
+ # -- Number of pods that are unavailble after eviction as number or percentage (eg.: 50%).
+ ## Has higher precedence over `dex.pdb.minAvailable`
+ maxUnavailable: ""
+
+ ## Dex image
+ image:
+ # -- Dex image repository
+ repository: ghcr.io/dexidp/dex
+ # -- Dex image tag
+ tag: v2.35.3
+ # -- Dex imagePullPolicy
+ # @default -- `""` (defaults to global.image.imagePullPolicy)
+ imagePullPolicy: ""
+
+ # -- Secrets with credentials to pull images from a private registry
+ # @default -- `[]` (defaults to global.imagePullSecrets)
+ imagePullSecrets: []
+
+ # Argo CD init image that creates Dex config
+ initImage:
+ # -- Argo CD init image repository
+ # @default -- `""` (defaults to global.image.repository)
+ repository: ""
+ # -- Argo CD init image tag
+ # @default -- `""` (defaults to global.image.tag)
+ tag: ""
+ # -- Argo CD init image imagePullPolicy
+ # @default -- `""` (defaults to global.image.imagePullPolicy)
+ imagePullPolicy: ""
+
+ # -- Environment variables to pass to the Dex server
+ env: []
+
+ # -- envFrom to pass to the Dex server
+ # @default -- `[]` (See [values.yaml])
+ envFrom: []
+ # - configMapRef:
+ # name: config-map-name
+ # - secretRef:
+ # name: secret-name
+
+ # -- Additional containers to be added to the dex pod
+ extraContainers: []
+
+ # -- Init containers to add to the dex pod
+ initContainers: []
+
+ # -- Additional volumeMounts to the dex main container
+ volumeMounts: []
+
+ # -- Additional volumes to the dex pod
+ volumes: []
+
+ # TLS certificate configuration via Secret
+ ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/tls/#configuring-tls-to-argocd-dex-server
+ ## Note: Issuing certificates via cert-manager in not supported right now because it's not possible to restart Dex automatically without extra controllers.
+ certificateSecret:
+ # -- Create argocd-dex-server-tls secret
+ enabled: false
+ # -- Labels to be added to argocd-dex-server-tls secret
+ labels: {}
+ # -- Annotations to be added to argocd-dex-server-tls secret
+ annotations: {}
+ # -- Certificate authority. Required for self-signed certificates.
+ ca: ''
+ # -- Certificate private key
+ key: ''
+ # -- Certificate data. Must contain SANs of Dex service (ie: argocd-dex-server, argocd-dex-server.argo-cd.svc)
+ crt: ''
+
+ # -- Annotations to be added to the Dex server Deployment
+ deploymentAnnotations: {}
+
+ # -- Annotations to be added to the Dex server pods
+ podAnnotations: {}
+
+ # -- Labels to be added to the Dex server pods
+ podLabels: {}
+
+ # -- Resource limits and requests for dex
+ resources: {}
+ # limits:
+ # cpu: 50m
+ # memory: 64Mi
+ # requests:
+ # cpu: 10m
+ # memory: 32Mi
+
+ # -- Dex container-level security context
+ # @default -- See [values.yaml]
+ containerSecurityContext:
+ runAsNonRoot: true
+ readOnlyRootFilesystem: true
+ allowPrivilegeEscalation: false
+ seccompProfile:
+ type: RuntimeDefault
+ capabilities:
+ drop:
+ - ALL
+
+ ## Probes for Dex server
+ ## Supported from Dex >= 2.28.0
+ livenessProbe:
+ # -- Enable Kubernetes liveness probe for Dex >= 2.28.0
+ enabled: false
+ # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded
+ failureThreshold: 3
+ # -- Number of seconds after the container has started before [probe] is initiated
+ initialDelaySeconds: 10
+ # -- How often (in seconds) to perform the [probe]
+ periodSeconds: 10
+ # -- Minimum consecutive successes for the [probe] to be considered successful after having failed
+ successThreshold: 1
+ # -- Number of seconds after which the [probe] times out
+ timeoutSeconds: 1
+ readinessProbe:
+ # -- Enable Kubernetes readiness probe for Dex >= 2.28.0
+ enabled: false
+ # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded
+ failureThreshold: 3
+ # -- Number of seconds after the container has started before [probe] is initiated
+ initialDelaySeconds: 10
+ # -- How often (in seconds) to perform the [probe]
+ periodSeconds: 10
+ # -- Minimum consecutive successes for the [probe] to be considered successful after having failed
+ successThreshold: 1
+ # -- Number of seconds after which the [probe] times out
+ timeoutSeconds: 1
+
+ serviceAccount:
+ # -- Create dex service account
+ create: true
+ # -- Dex service account name
+ name: argocd-dex-server
+ # -- Annotations applied to created service account
+ annotations: {}
+ # -- Automount API credentials for the Service Account
+ automountServiceAccountToken: true
+
+ # -- Container port for HTTP access
+ containerPortHttp: 5556
+ # -- Service port for HTTP access
+ servicePortHttp: 5556
+ # -- Service port name for HTTP access
+ servicePortHttpName: http
+ # -- Container port for gRPC access
+ containerPortGrpc: 5557
+ # -- Service port for gRPC access
+ servicePortGrpc: 5557
+ # -- Service port name for gRPC access
+ servicePortGrpcName: grpc
+ # -- Container port for metrics access
+ containerPortMetrics: 5558
+ # -- Service port for metrics access
+ servicePortMetrics: 5558
+
+ # -- [Node selector]
+ nodeSelector: {}
+ # -- [Tolerations] for use with node taints
+ tolerations: []
+ # -- Assign custom [affinity] rules to the deployment
+ affinity: {}
+
+ # -- Assign custom [TopologySpreadConstraints] rules to dex
+ ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
+ ## If labelSelector is left out, it will default to the labelSelector configuration of the deployment
+ topologySpreadConstraints: []
+ # - maxSkew: 1
+ # topologyKey: topology.kubernetes.io/zone
+ # whenUnsatisfiable: DoNotSchedule
+
+ # -- Priority class for dex
+ priorityClassName: ""
+
+## Redis
+redis:
+ # -- Enable redis
+ enabled: true
+ # -- Redis name
+ name: redis
+
+ ## Redis Pod Disruption Budget
+ ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
+ pdb:
+ # -- Deploy a [PodDisruptionBudget] for the Redis
+ enabled: false
+ # -- Labels to be added to Redis pdb
+ labels: {}
+ # -- Annotations to be added to Redis pdb
+ annotations: {}
+ # -- Number of pods that are available after eviction as number or percentage (eg.: 50%)
+ # @default -- `""` (defaults to 0 if not specified)
+ minAvailable: ""
+ # -- Number of pods that are unavailble after eviction as number or percentage (eg.: 50%).
+ ## Has higher precedence over `redis.pdb.minAvailable`
+ maxUnavailable: ""
+
+ ## Redis image
+ image:
+ # -- Redis repository
+ repository: public.ecr.aws/docker/library/redis
+ # -- Redis tag
+ tag: 7.0.5-alpine
+ # -- Redis imagePullPolicy
+ imagePullPolicy: IfNotPresent
+
+ # -- Secrets with credentials to pull images from a private registry
+ # @default -- `[]` (defaults to global.imagePullSecrets)
+ imagePullSecrets: []
+
+ # -- Additional command line arguments to pass to redis-server
+ extraArgs: []
+ # - --bind
+ # - "0.0.0.0"
+
+ # -- Environment variables to pass to the Redis server
+ env: []
+
+ # -- envFrom to pass to the Redis server
+ # @default -- `[]` (See [values.yaml])
+ envFrom: []
+ # - configMapRef:
+ # name: config-map-name
+ # - secretRef:
+ # name: secret-name
+
+ # -- Additional containers to be added to the redis pod
+ extraContainers: []
+
+ # -- Init containers to add to the redis pod
+ initContainers: []
+
+ # -- Additional volumeMounts to the redis container
+ volumeMounts: []
+
+ # -- Additional volumes to the redis pod
+ volumes: []
+
+ # -- Annotations to be added to the Redis server Deployment
+ deploymentAnnotations: {}
+
+ # -- Annotations to be added to the Redis server pods
+ podAnnotations: {}
+
+ # -- Labels to be added to the Redis server pods
+ podLabels: {}
+
+ # -- Resource limits and requests for redis
+ resources: {}
+ # limits:
+ # cpu: 200m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 64Mi
+
+ # -- Redis pod-level security context
+ # @default -- See [values.yaml]
+ securityContext:
+ runAsNonRoot: true
+ runAsUser: 999
+ seccompProfile:
+ type: RuntimeDefault
+
+ # -- Redis container-level security context
+ # @default -- See [values.yaml]
+ containerSecurityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+
+ # -- Redis container port
+ containerPort: 6379
+ # -- Redis service port
+ servicePort: 6379
+
+ # -- [Node selector]
+ nodeSelector: {}
+
+ # -- [Tolerations] for use with node taints
+ tolerations: []
+
+ # -- Assign custom [affinity] rules to the deployment
+ affinity: {}
+
+ # -- Assign custom [TopologySpreadConstraints] rules to redis
+ ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
+ ## If labelSelector is left out, it will default to the labelSelector configuration of the deployment
+ topologySpreadConstraints: []
+ # - maxSkew: 1
+ # topologyKey: topology.kubernetes.io/zone
+ # whenUnsatisfiable: DoNotSchedule
+
+ # -- Priority class for redis
+ priorityClassName: ""
+
+ serviceAccount:
+ # -- Create a service account for the redis pod
+ create: false
+ # -- Service account name for redis pod
+ name: ""
+ # -- Annotations applied to created service account
+ annotations: {}
+ # -- Automount API credentials for the Service Account
+ automountServiceAccountToken: false
+
+ service:
+ # -- Redis service annotations
+ annotations: {}
+ # -- Additional redis service labels
+ labels: {}
+
+ metrics:
+ # -- Deploy metrics service and redis-exporter sidecar
+ enabled: false
+ image:
+ # -- redis-exporter image repository
+ repository: public.ecr.aws/bitnami/redis-exporter
+ # -- redis-exporter image tag
+ tag: 1.26.0-debian-10-r2
+ # -- redis-exporter image PullPolicy
+ imagePullPolicy: IfNotPresent
+ # -- Port to use for redis-exporter sidecar
+ containerPort: 9121
+
+ # -- Redis exporter security context
+ # @default -- See [values.yaml]
+ containerSecurityContext:
+ runAsNonRoot: true
+ readOnlyRootFilesystem: true
+ allowPrivilegeEscalation: false
+ seccompProfile:
+ type: RuntimeDefault
+ capabilities:
+ drop:
+ - ALL
+
+ # -- Resource limits and requests for redis-exporter sidecar
+ resources: {}
+ # limits:
+ # cpu: 50m
+ # memory: 64Mi
+ # requests:
+ # cpu: 10m
+ # memory: 32Mi
+ service:
+ # -- Metrics service type
+ type: ClusterIP
+ # -- Metrics service clusterIP. `None` makes a "headless service" (no virtual IP)
+ clusterIP: None
+ # -- Metrics service annotations
+ annotations: {}
+ # -- Metrics service labels
+ labels: {}
+ # -- Metrics service port
+ servicePort: 9121
+ # -- Metrics service port name
+ portName: http-metrics
+ serviceMonitor:
+ # -- Enable a prometheus ServiceMonitor
+ enabled: false
+ # -- Interval at which metrics should be scraped
+ interval: 30s
+ # -- Prometheus [RelabelConfigs] to apply to samples before scraping
+ relabelings: []
+ # -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestion
+ metricRelabelings: []
+ # -- Prometheus ServiceMonitor selector
+ selector: {}
+ # prometheus: kube-prometheus
+
+ # -- Prometheus ServiceMonitor scheme
+ scheme: ""
+ # -- Prometheus ServiceMonitor tlsConfig
+ tlsConfig: {}
+ # -- Prometheus ServiceMonitor namespace
+ namespace: "" # "monitoring"
+ # -- Prometheus ServiceMonitor labels
+ additionalLabels: {}
+ # -- Prometheus ServiceMonitor annotations
+ annotations: {}
+
+# This key configures Redis-HA subchart and when enabled (redis-ha.enabled=true)
+# the custom redis deployment is omitted
+# Check the redis-ha chart for more properties
+redis-ha:
+ # -- Enables the Redis HA subchart and disables the custom Redis single node deployment
+ enabled: false
+ exporter:
+ # -- If `true`, the prometheus exporter sidecar is enabled
+ enabled: true
+ persistentVolume:
+ # -- Configures persistency on Redis nodes
+ enabled: false
+ redis:
+ # -- Redis convention for naming the cluster group: must match `^[\\w-\\.]+$` and can be templated
+ masterGroupName: argocd
+ # -- Any valid redis config options in this section will be applied to each server (see `redis-ha` chart)
+ # @default -- See [values.yaml]
+ config:
+ # -- Will save the DB if both the given number of seconds and the given number of write operations against the DB occurred. `""` is disabled
+ # @default -- `'""'`
+ save: '""'
+ haproxy:
+ # -- Enabled HAProxy LoadBalancing/Proxy
+ enabled: true
+ metrics:
+ # -- HAProxy enable prometheus metric scraping
+ enabled: true
+ image:
+ # -- Redis tag
+ tag: 7.0.5-alpine
+
+ ## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
+ topologySpreadConstraints:
+ # -- Enable Redis HA topology spread constraints
+ enabled: false
+ # -- Max skew of pods tolerated
+ # @default -- `""` (defaults to `1`)
+ maxSkew: ""
+ # -- Topology key for spread
+ # @default -- `""` (defaults to `topology.kubernetes.io/zone`)
+ topologyKey: ""
+ # -- Enforcement policy, hard or soft
+ # @default -- `""` (defaults to `ScheduleAnyway`)
+ whenUnsatisfiable: ""
+
+# External Redis parameters
+externalRedis:
+ # -- External Redis server host
+ host: ""
+ # -- External Redis username
+ username: ""
+ # -- External Redis password
+ password: ""
+ # -- External Redis server port
+ port: 6379
+ # -- The name of an existing secret with Redis credentials (must contain key `redis-password`).
+ # When it's set, the `externalRedis.password` parameter is ignored
+ existingSecret: ""
+ # -- External Redis Secret annotations
+ secretAnnotations: {}
+
+## Server
+server:
+ # -- Argo CD server name
+ name: server
+
+ # -- The number of server pods to run
+ replicas: 1
+
+ ## Argo CD server Horizontal Pod Autoscaler
+ autoscaling:
+ # -- Enable Horizontal Pod Autoscaler ([HPA]) for the Argo CD server
+ enabled: false
+ # -- Minimum number of replicas for the Argo CD server [HPA]
+ minReplicas: 1
+ # -- Maximum number of replicas for the Argo CD server [HPA]
+ maxReplicas: 5
+ # -- Average CPU utilization percentage for the Argo CD server [HPA]
+ targetCPUUtilizationPercentage: 50
+ # -- Average memory utilization percentage for the Argo CD server [HPA]
+ targetMemoryUtilizationPercentage: 50
+ # -- Configures the scaling behavior of the target in both Up and Down directions.
+ # This is only available on HPA apiVersion `autoscaling/v2beta2` and newer
+ behavior: {}
+ # scaleDown:
+ # stabilizationWindowSeconds: 300
+ # policies:
+ # - type: Pods
+ # value: 1
+ # periodSeconds: 180
+ # scaleUp:
+ # stabilizationWindowSeconds: 300
+ # policies:
+ # - type: Pods
+ # value: 2
+ # periodSeconds: 60
+
+ ## Argo CD server Pod Disruption Budget
+ ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
+ pdb:
+ # -- Deploy a [PodDisruptionBudget] for the Argo CD server
+ enabled: false
+ # -- Labels to be added to Argo CD server pdb
+ labels: {}
+ # -- Annotations to be added to Argo CD server pdb
+ annotations: {}
+ # -- Number of pods that are available after eviction as number or percentage (eg.: 50%)
+ # @default -- `""` (defaults to 0 if not specified)
+ minAvailable: ""
+ # -- Number of pods that are unavailble after eviction as number or percentage (eg.: 50%).
+ ## Has higher precedence over `server.pdb.minAvailable`
+ maxUnavailable: ""
+
+ ## Argo CD server image
+ image:
+ # -- Repository to use for the Argo CD server
+ # @default -- `""` (defaults to global.image.repository)
+ repository: "" # defaults to global.image.repository
+ # -- Tag to use for the Argo CD server
+ # @default -- `""` (defaults to global.image.tag)
+ tag: "" # defaults to global.image.tag
+ # -- Image pull policy for the Argo CD server
+ # @default -- `""` (defaults to global.image.imagePullPolicy)
+ imagePullPolicy: "" # IfNotPresent
+
+ # -- Secrets with credentials to pull images from a private registry
+ # @default -- `[]` (defaults to global.imagePullSecrets)
+ imagePullSecrets: []
+
+ # -- Additional command line arguments to pass to Argo CD server
+ extraArgs: [--insecure]
+
+ # -- Environment variables to pass to Argo CD server
+ env: []
+
+ # -- envFrom to pass to Argo CD server
+ # @default -- `[]` (See [values.yaml])
+ envFrom: []
+ # - configMapRef:
+ # name: config-map-name
+ # - secretRef:
+ # name: secret-name
+
+ # -- Specify postStart and preStop lifecycle hooks for your argo-cd-server container
+ lifecycle: {}
+
+ ## Argo UI extensions
+ ## This function in tech preview stage, do expect unstability or breaking changes in newer versions.
+ ## Ref: https://github.com/argoproj-labs/argocd-extensions
+ extensions:
+ # -- Enable support for Argo UI extensions
+ enabled: false
+
+ ## Argo UI extensions image
+ image:
+ # -- Repository to use for extensions image
+ repository: "ghcr.io/argoproj-labs/argocd-extensions"
+ # -- Tag to use for extensions image
+ tag: "v0.1.0"
+ # -- Image pull policy for extensions
+ # @default -- `""` (defaults to global.image.imagePullPolicy)
+ imagePullPolicy: ""
+
+ # -- Server UI extensions container-level security context
+ # @default -- See [values.yaml]
+ containerSecurityContext:
+ runAsNonRoot: true
+ readOnlyRootFilesystem: true
+ allowPrivilegeEscalation: false
+ seccompProfile:
+ type: RuntimeDefault
+ capabilities:
+ drop:
+ - ALL
+
+ # -- Resource limits and requests for the argocd-extensions container
+ resources: {}
+ # limits:
+ # cpu: 50m
+ # memory: 128Mi
+ # requests:
+ # cpu: 10m
+ # memory: 64Mi
+
+ # -- Additional containers to be added to the server pod
+ ## See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example.
+ extraContainers: []
+ # - name: my-sidecar
+ # image: nginx:latest
+ # - name: lemonldap-ng-controller
+ # image: lemonldapng/lemonldap-ng-controller:0.2.0
+ # args:
+ # - /lemonldap-ng-controller
+ # - --alsologtostderr
+ # - --configmap=$(POD_NAMESPACE)/lemonldap-ng-configuration
+ # env:
+ # - name: POD_NAME
+ # valueFrom:
+ # fieldRef:
+ # fieldPath: metadata.name
+ # - name: POD_NAMESPACE
+ # valueFrom:
+ # fieldRef:
+ # fieldPath: metadata.namespace
+ # volumeMounts:
+ # - name: copy-portal-skins
+ # mountPath: /srv/var/lib/lemonldap-ng/portal/skins
+
+ # -- Init containers to add to the server pod
+ ## If your target Kubernetes cluster(s) require a custom credential (exec) plugin
+ ## you could use this (and the same in the application controller pod) to provide such executable
+ ## Ref: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#client-go-credential-plugins
+ initContainers: []
+ # - name: download-tools
+ # image: alpine:3
+ # command: [sh, -c]
+ # args:
+ # - wget -qO kubelogin.zip https://github.com/Azure/kubelogin/releases/download/v0.0.25/kubelogin-linux-amd64.zip &&
+ # unzip kubelogin.zip && mv bin/linux_amd64/kubelogin /custom-tools/
+ # volumeMounts:
+ # - mountPath: /custom-tools
+ # name: custom-tools
+
+ # -- Additional volumeMounts to the server main container
+ volumeMounts: []
+ # - mountPath: /usr/local/bin/kubelogin
+ # name: custom-tools
+ # subPath: kubelogin
+
+ # -- Additional volumes to the server pod
+ volumes: []
+ # - name: custom-tools
+ # emptyDir: {}
+
+ # -- Annotations to be added to server Deployment
+ deploymentAnnotations: {}
+
+ # -- Annotations to be added to server pods
+ podAnnotations: {}
+
+ # -- Labels to be added to server pods
+ podLabels: {}
+
+ # -- Resource limits and requests for the Argo CD server
+ resources: {}
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 50m
+ # memory: 64Mi
+
+ # -- Configures the server port
+ containerPort: 8080
+
+ ## Readiness and liveness probes for default backend
+ ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
+ readinessProbe:
+ # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded
+ failureThreshold: 3
+ # -- Number of seconds after the container has started before [probe] is initiated
+ initialDelaySeconds: 10
+ # -- How often (in seconds) to perform the [probe]
+ periodSeconds: 10
+ # -- Minimum consecutive successes for the [probe] to be considered successful after having failed
+ successThreshold: 1
+ # -- Number of seconds after which the [probe] times out
+ timeoutSeconds: 1
+ livenessProbe:
+ # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded
+ failureThreshold: 3
+ # -- Number of seconds after the container has started before [probe] is initiated
+ initialDelaySeconds: 10
+ # -- How often (in seconds) to perform the [probe]
+ periodSeconds: 10
+ # -- Minimum consecutive successes for the [probe] to be considered successful after having failed
+ successThreshold: 1
+ # -- Number of seconds after which the [probe] times out
+ timeoutSeconds: 1
+
+ # -- [Node selector]
+ nodeSelector: {}
+ # -- [Tolerations] for use with node taints
+ tolerations: []
+ # -- Assign custom [affinity] rules to the deployment
+ affinity: {}
+
+ # -- Assign custom [TopologySpreadConstraints] rules to the Argo CD server
+ ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
+ ## If labelSelector is left out, it will default to the labelSelector configuration of the deployment
+ topologySpreadConstraints: []
+ # - maxSkew: 1
+ # topologyKey: topology.kubernetes.io/zone
+ # whenUnsatisfiable: DoNotSchedule
+
+ # -- Priority class for the Argo CD server
+ priorityClassName: ""
+
+ # -- Server container-level security context
+ # @default -- See [values.yaml]
+ containerSecurityContext:
+ runAsNonRoot: true
+ readOnlyRootFilesystem: true
+ allowPrivilegeEscalation: false
+ seccompProfile:
+ type: RuntimeDefault
+ capabilities:
+ drop:
+ - ALL
+
+ # TLS certificate configuration via cert-manager
+ ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/tls/#tls-certificates-used-by-argocd-server
+ certificate:
+ # -- Deploy a Certificate resource (requires cert-manager)
+ enabled: false
+ # -- The name of the Secret that will be automatically created and managed by this Certificate resource
+ secretName: argocd-server-tls
+ # -- Certificate primary domain (commonName)
+ domain: argocd.example.com
+ # -- Certificate Subject Alternate Names (SANs)
+ additionalHosts: []
+ # -- The requested 'duration' (i.e. lifetime) of the certificate.
+ # @default -- `""` (defaults to 2160h = 90d if not specified)
+ ## Ref: https://cert-manager.io/docs/usage/certificate/#renewal
+ duration: ""
+ # -- How long before the expiry a certificate should be renewed.
+ # @default -- `""` (defaults to 360h = 15d if not specified)
+ ## Ref: https://cert-manager.io/docs/usage/certificate/#renewal
+ renewBefore: ""
+ # Certificate issuer
+ ## Ref: https://cert-manager.io/docs/concepts/issuer
+ issuer:
+ # -- Certificate issuer group. Set if using an external issuer. Eg. `cert-manager.io`
+ group: ""
+ # -- Certificate issuer kind. Either `Issuer` or `ClusterIssuer`
+ kind: ""
+ # -- Certificate isser name. Eg. `letsencrypt`
+ name: ""
+ # Private key of the certificate
+ privateKey:
+ # -- Rotation policy of private key when certificate is re-issued. Either: `Never` or `Always`
+ rotationPolicy: Never
+ # -- The private key cryptography standards (PKCS) encoding for private key. Either: `PCKS1` or `PKCS8`
+ encoding: PKCS1
+ # -- Algorithm used to generate certificate private key. One of: `RSA`, `Ed25519` or `ECDSA`
+ algorithm: RSA
+ # -- Key bit size of the private key. If algorithm is set to `Ed25519`, size is ignored.
+ size: 2048
+
+ # TLS certificate configuration via Secret
+ ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/tls/#tls-certificates-used-by-argocd-server
+ certificateSecret:
+ # -- Create argocd-server-tls secret
+ enabled: false
+ # -- Annotations to be added to argocd-server-tls secret
+ annotations: {}
+ # -- Labels to be added to argocd-server-tls secret
+ labels: {}
+ # -- Private Key of the certificate
+ key: ''
+ # -- Certificate data
+ crt: ''
+
+ ## Server service configuration
+ service:
+ # -- Server service annotations
+ annotations: {}
+ # -- Server service labels
+ labels: {}
+ # -- Server service type
+ type: ClusterIP
+ # -- Server service http port for NodePort service type (only if `server.service.type` is set to "NodePort")
+ nodePortHttp: 30080
+ # -- Server service https port for NodePort service type (only if `server.service.type` is set to "NodePort")
+ nodePortHttps: 30443
+ # -- Server service http port
+ servicePortHttp: 80
+ # -- Server service https port
+ servicePortHttps: 443
+ # -- Server service http port name, can be used to route traffic via istio
+ servicePortHttpName: http
+ # -- Server service https port name, can be used to route traffic via istio
+ servicePortHttpsName: https
+ # -- Use named target port for argocd
+ ## Named target ports are not supported by GCE health checks, so when deploying argocd on GKE
+ ## and exposing it via GCE ingress, the health checks fail and the load balancer returns a 502.
+ namedTargetPort: true
+ # -- LoadBalancer will get created with the IP specified in this field
+ loadBalancerIP: ""
+ # -- Source IP ranges to allow access to service from
+ loadBalancerSourceRanges: []
+ # -- Server service external IPs
+ externalIPs: []
+ # -- Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
+ externalTrafficPolicy: ""
+ # -- Used to maintain session affinity. Supports `ClientIP` and `None`
+ sessionAffinity: ""
+
+ ## Server metrics service configuration
+ metrics:
+ # -- Deploy metrics service
+ enabled: false
+ service:
+ # -- Metrics service annotations
+ annotations: {}
+ # -- Metrics service labels
+ labels: {}
+ # -- Metrics service port
+ servicePort: 8083
+ # -- Metrics service port name
+ portName: http-metrics
+ serviceMonitor:
+ # -- Enable a prometheus ServiceMonitor
+ enabled: false
+ # -- Prometheus ServiceMonitor interval
+ interval: 30s
+ # -- Prometheus [RelabelConfigs] to apply to samples before scraping
+ relabelings: []
+ # -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestion
+ metricRelabelings: []
+ # -- Prometheus ServiceMonitor selector
+ selector: {}
+ # prometheus: kube-prometheus
+
+ # -- Prometheus ServiceMonitor scheme
+ scheme: ""
+ # -- Prometheus ServiceMonitor tlsConfig
+ tlsConfig: {}
+ # -- Prometheus ServiceMonitor namespace
+ namespace: "" # monitoring
+ # -- Prometheus ServiceMonitor labels
+ additionalLabels: {}
+ # -- Prometheus ServiceMonitor annotations
+ annotations: {}
+
+ serviceAccount:
+ # -- Create server service account
+ create: true
+ # -- Server service account name
+ name: argocd-server
+ # -- Annotations applied to created service account
+ annotations: {}
+ # -- Labels applied to created service account
+ labels: {}
+ # -- Automount API credentials for the Service Account
+ automountServiceAccountToken: true
+
+ ingress:
+ # -- Enable an ingress resource for the Argo CD server
+ enabled: false
+ # -- Additional ingress annotations
+ annotations: {}
+ # -- Additional ingress labels
+ labels: {}
+ # -- Defines which ingress controller will implement the resource
+ ingressClassName: ""
+
+ # -- List of ingress hosts
+ ## Argo Ingress.
+ ## Hostnames must be provided if Ingress is enabled.
+ ## Secrets must be manually created in the namespace
+ hosts: []
+ # - argocd.example.com
+
+ # -- List of ingress paths
+ paths:
+ - /
+ # -- Ingress path type. One of `Exact`, `Prefix` or `ImplementationSpecific`
+ pathType: Prefix
+ # -- Additional ingress paths
+ extraPaths: []
+ # - path: /*
+ # pathType: Prefix
+ # backend:
+ # service:
+ # name: ssl-redirect
+ # port:
+ # name: use-annotation
+
+ # -- Ingress TLS configuration
+ tls: []
+ # - secretName: your-certificate-name
+ # hosts:
+ # - argocd.example.com
+
+ # -- Uses `server.service.servicePortHttps` instead `server.service.servicePortHttp`
+ https: false
+
+ # dedicated ingress for gRPC as documented at
+ # Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/ingress/
+ ingressGrpc:
+ # -- Enable an ingress resource for the Argo CD server for dedicated [gRPC-ingress]
+ enabled: false
+ # -- Setup up gRPC ingress to work with an AWS ALB
+ isAWSALB: false
+ # -- Additional ingress annotations for dedicated [gRPC-ingress]
+ annotations: {}
+ # -- Additional ingress labels for dedicated [gRPC-ingress]
+ labels: {}
+ # -- Defines which ingress controller will implement the resource [gRPC-ingress]
+ ingressClassName: ""
+
+ awsALB:
+ # -- Service type for the AWS ALB gRPC service
+ ## Service Type if isAWSALB is set to true
+ ## Can be of type NodePort or ClusterIP depending on which mode you are
+ ## are running. Instance mode needs type NodePort, IP mode needs type
+ ## ClusterIP
+ ## Ref: https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.2/how-it-works/#ingress-traffic
+ serviceType: NodePort
+ # -- Backend protocol version for the AWS ALB gRPC service
+ ## This tells AWS to send traffic from the ALB using HTTP2. Can use gRPC as well if you want to leverage gRPC specific features
+ backendProtocolVersion: HTTP2
+
+ # -- List of ingress hosts for dedicated [gRPC-ingress]
+ ## Argo Ingress.
+ ## Hostnames must be provided if Ingress is enabled.
+ ## Secrets must be manually created in the namespace
+ ##
+ hosts: []
+ # - argocd.example.com
+
+ # -- List of ingress paths for dedicated [gRPC-ingress]
+ paths:
+ - /
+ # -- Ingress path type for dedicated [gRPC-ingress]. One of `Exact`, `Prefix` or `ImplementationSpecific`
+ pathType: Prefix
+ # -- Additional ingress paths for dedicated [gRPC-ingress]
+ extraPaths: []
+ # - path: /*
+ # pathType: Prefix
+ # backend:
+ # service:
+ # name: ssl-redirect
+ # port:
+ # name: use-annotation
+
+ # -- Ingress TLS configuration for dedicated [gRPC-ingress]
+ tls: []
+ # - secretName: your-certificate-name
+ # hosts:
+ # - argocd.example.com
+
+ # -- Uses `server.service.servicePortHttps` instead `server.service.servicePortHttp`
+ https: false
+
+ # Create a OpenShift Route with SSL passthrough for UI and CLI
+ # Consider setting 'hostname' e.g. https://argocd.apps-crc.testing/ using your Default Ingress Controller Domain
+ # Find your domain with: kubectl describe --namespace=openshift-ingress-operator ingresscontroller/default | grep Domain:
+ # If 'hostname' is an empty string "" OpenShift will create a hostname for you.
+ route:
+ # -- Enable an OpenShift Route for the Argo CD server
+ enabled: false
+ # -- Openshift Route annotations
+ annotations: {}
+ # -- Hostname of OpenShift Route
+ hostname: ""
+ # -- Termination type of Openshift Route
+ termination_type: passthrough
+ # -- Termination policy of Openshift Route
+ termination_policy: None
+
+ ## Enable Admin ClusterRole resources.
+ ## Enable if you would like to grant rights to Argo CD to deploy to the local Kubernetes cluster.
+ clusterAdminAccess:
+ # -- Enable RBAC for local cluster deployments
+ enabled: true
+
+ GKEbackendConfig:
+ # -- Enable BackendConfig custom resource for Google Kubernetes Engine
+ enabled: false
+ # -- [BackendConfigSpec]
+ spec: {}
+ # spec:
+ # iap:
+ # enabled: true
+ # oauthclientCredentials:
+ # secretName: argocd-secret
+
+ ## Create a Google Managed Certificate for use with the GKE Ingress Controller
+ ## https://cloud.google.com/kubernetes-engine/docs/how-to/managed-certs
+ GKEmanagedCertificate:
+ # -- Enable ManagedCertificate custom resource for Google Kubernetes Engine.
+ enabled: false
+ # -- Domains for the Google Managed Certificate
+ domains:
+ - argocd.example.com
+
+ ## Create a Google FrontendConfig Custom Resource, for use with the GKE Ingress Controller
+ ## https://cloud.google.com/kubernetes-engine/docs/how-to/ingress-features#configuring_ingress_features_through_frontendconfig_parameters
+ GKEfrontendConfig:
+ # -- Enable FrontConfig custom resource for Google Kubernetes Engine
+ enabled: false
+ # -- [FrontendConfigSpec]
+ spec: {}
+ # spec:
+ # redirectToHttps:
+ # enabled: true
+ # responseCodeName: RESPONSE_CODE
+
+## Repo Server
+repoServer:
+ # -- Repo server name
+ name: repo-server
+
+ # -- The number of repo server pods to run
+ replicas: 1
+
+ ## Repo server Horizontal Pod Autoscaler
+ autoscaling:
+ # -- Enable Horizontal Pod Autoscaler ([HPA]) for the repo server
+ enabled: false
+ # -- Minimum number of replicas for the repo server [HPA]
+ minReplicas: 1
+ # -- Maximum number of replicas for the repo server [HPA]
+ maxReplicas: 5
+ # -- Average CPU utilization percentage for the repo server [HPA]
+ targetCPUUtilizationPercentage: 50
+ # -- Average memory utilization percentage for the repo server [HPA]
+ targetMemoryUtilizationPercentage: 50
+ # -- Configures the scaling behavior of the target in both Up and Down directions.
+ # This is only available on HPA apiVersion `autoscaling/v2beta2` and newer
+ behavior: {}
+ # scaleDown:
+ # stabilizationWindowSeconds: 300
+ # policies:
+ # - type: Pods
+ # value: 1
+ # periodSeconds: 180
+ # scaleUp:
+ # stabilizationWindowSeconds: 300
+ # policies:
+ # - type: Pods
+ # value: 2
+ # periodSeconds: 60
+
+ ## Repo server Pod Disruption Budget
+ ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
+ pdb:
+ # -- Deploy a [PodDisruptionBudget] for the repo server
+ enabled: false
+ # -- Labels to be added to repo server pdb
+ labels: {}
+ # -- Annotations to be added to repo server pdb
+ annotations: {}
+ # -- Number of pods that are available after eviction as number or percentage (eg.: 50%)
+ # @default -- `""` (defaults to 0 if not specified)
+ minAvailable: ""
+ # -- Number of pods that are unavailble after eviction as number or percentage (eg.: 50%).
+ ## Has higher precedence over `repoServer.pdb.minAvailable`
+ maxUnavailable: ""
+
+ ## Repo server image
+ image:
+ # -- Repository to use for the repo server
+ # @default -- `""` (defaults to global.image.repository)
+ repository: ""
+ # -- Tag to use for the repo server
+ # @default -- `""` (defaults to global.image.tag)
+ tag: ""
+ # -- Image pull policy for the repo server
+ # @default -- `""` (defaults to global.image.imagePullPolicy)
+ imagePullPolicy: ""
+
+ # -- Secrets with credentials to pull images from a private registry
+ # @default -- `[]` (defaults to global.imagePullSecrets)
+ imagePullSecrets: []
+
+ # -- Additional command line arguments to pass to repo server
+ extraArgs: []
+
+ # -- Environment variables to pass to repo server
+ env: []
+
+ # -- envFrom to pass to repo server
+ # @default -- `[]` (See [values.yaml])
+ envFrom: []
+ # - configMapRef:
+ # name: config-map-name
+ # - secretRef:
+ # name: secret-name
+
+ # -- Additional containers to be added to the repo server pod
+ ## Ref: https://argo-cd.readthedocs.io/en/stable/user-guide/config-management-plugins/
+ extraContainers: []
+ # - name: cmp
+ # # Entrypoint should be Argo CD lightweight CMP server i.e. argocd-cmp-server
+ # command: [/var/run/argocd/argocd-cmp-server]
+ # image: busybox # This can be off-the-shelf or custom-built image
+ # securityContext:
+ # runAsNonRoot: true
+ # runAsUser: 999
+ # volumeMounts:
+ # - mountPath: /var/run/argocd
+ # name: var-files
+ # - mountPath: /home/argocd/cmp-server/plugins
+ # name: plugins
+ # # Remove this volumeMount if you've chosen to bake the config file into the sidecar image.
+ # - mountPath: /home/argocd/cmp-server/config/plugin.yaml
+ # subPath: plugin.yaml
+ # name: cmp-plugin
+ # # Starting with v2.4, do NOT mount the same tmp volume as the repo-server container. The filesystem separation helps
+ # # mitigate path traversal attacks.
+ # - mountPath: /tmp
+ # name: cmp-tmp
+
+ # -- Init containers to add to the repo server pods
+ initContainers: []
+
+ # -- Additional volumeMounts to the repo server main container
+ volumeMounts: []
+
+ # -- Additional volumes to the repo server pod
+ volumes: []
+ # - name: cmp-plugin
+ # configMap:
+ # name: cmp-plugin
+ # - name: cmp-tmp
+ # emptyDir: {}
+
+ # -- Annotations to be added to repo server Deployment
+ deploymentAnnotations: {}
+
+ # -- Annotations to be added to repo server pods
+ podAnnotations: {}
+
+ # -- Labels to be added to repo server pods
+ podLabels: {}
+
+ # -- Resource limits and requests for the repo server pods
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ ephemeral-storage: 2Gi
+ requests:
+ cpu: 100m
+ memory: 256Mi
+ ephemeral-storage: 2Gi
+
+ # -- Configures the repo server port
+ containerPort: 8081
+
+ ## Readiness and liveness probes for default backend
+ ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
+ readinessProbe:
+ # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded
+ failureThreshold: 3
+ # -- Number of seconds after the container has started before [probe] is initiated
+ initialDelaySeconds: 10
+ # -- How often (in seconds) to perform the [probe]
+ periodSeconds: 10
+ # -- Minimum consecutive successes for the [probe] to be considered successful after having failed
+ successThreshold: 1
+ # -- Number of seconds after which the [probe] times out
+ timeoutSeconds: 1
+ livenessProbe:
+ # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded
+ failureThreshold: 3
+ # -- Number of seconds after the container has started before [probe] is initiated
+ initialDelaySeconds: 10
+ # -- How often (in seconds) to perform the [probe]
+ periodSeconds: 10
+ # -- Minimum consecutive successes for the [probe] to be considered successful after having failed
+ successThreshold: 1
+ # -- Number of seconds after which the [probe] times out
+ timeoutSeconds: 1
+
+ # -- [Node selector]
+ nodeSelector: {}
+ # -- [Tolerations] for use with node taints
+ tolerations: []
+ # -- Assign custom [affinity] rules to the deployment
+ affinity: {}
+
+ # -- Assign custom [TopologySpreadConstraints] rules to the repo server
+ ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
+ ## If labelSelector is left out, it will default to the labelSelector configuration of the deployment
+ topologySpreadConstraints: []
+ # - maxSkew: 1
+ # topologyKey: topology.kubernetes.io/zone
+ # whenUnsatisfiable: DoNotSchedule
+
+ # -- Priority class for the repo server
+ priorityClassName: ""
+
+ # -- Repo server container-level security context
+ # @default -- See [values.yaml]
+ containerSecurityContext:
+ runAsNonRoot: true
+ readOnlyRootFilesystem: true
+ allowPrivilegeEscalation: false
+ seccompProfile:
+ type: RuntimeDefault
+ capabilities:
+ drop:
+ - ALL
+
+ # TLS certificate configuration via Secret
+ ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/tls/#configuring-tls-to-argocd-repo-server
+ ## Note: Issuing certificates via cert-manager in not supported right now because it's not possible to restart repo server automatically without extra controllers.
+ certificateSecret:
+ # -- Create argocd-repo-server-tls secret
+ enabled: false
+ # -- Annotations to be added to argocd-repo-server-tls secret
+ annotations: {}
+ # -- Labels to be added to argocd-repo-server-tls secret
+ labels: {}
+ # -- Certificate authority. Required for self-signed certificates.
+ ca: ''
+ # -- Certificate private key
+ key: ''
+ # -- Certificate data. Must contain SANs of Repo service (ie: argocd-repo-server, argocd-repo-server.argo-cd.svc)
+ crt: ''
+
+ ## Repo server service configuration
+ service:
+ # -- Repo server service annotations
+ annotations: {}
+ # -- Repo server service labels
+ labels: {}
+ # -- Repo server service port
+ port: 8081
+ # -- Repo server service port name
+ portName: https-repo-server
+
+ ## Repo server metrics service configuration
+ metrics:
+ # -- Deploy metrics service
+ enabled: false
+ service:
+ # -- Metrics service annotations
+ annotations: {}
+ # -- Metrics service labels
+ labels: {}
+ # -- Metrics service port
+ servicePort: 8084
+ # -- Metrics service port name
+ portName: http-metrics
+ serviceMonitor:
+ # -- Enable a prometheus ServiceMonitor
+ enabled: false
+ # -- Prometheus ServiceMonitor interval
+ interval: 30s
+ # -- Prometheus [RelabelConfigs] to apply to samples before scraping
+ relabelings: []
+ # -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestion
+ metricRelabelings: []
+ # -- Prometheus ServiceMonitor selector
+ selector: {}
+ # prometheus: kube-prometheus
+
+ # -- Prometheus ServiceMonitor scheme
+ scheme: ""
+ # -- Prometheus ServiceMonitor tlsConfig
+ tlsConfig: {}
+ # -- Prometheus ServiceMonitor namespace
+ namespace: "" # "monitoring"
+ # -- Prometheus ServiceMonitor labels
+ additionalLabels: {}
+ # -- Prometheus ServiceMonitor annotations
+ annotations: {}
+
+ ## Enable Admin ClusterRole resources.
+ ## Enable if you would like to grant cluster rights to Argo CD repo server.
+ clusterAdminAccess:
+ # -- Enable RBAC for local cluster deployments
+ enabled: false
+ ## Enable Custom Rules for the Repo server's Cluster Role resource
+ ## Enable this and set the rules: to whatever custom rules you want for the Cluster Role resource.
+ ## Defaults to off
+ clusterRoleRules:
+ # -- Enable custom rules for the Repo server's Cluster Role resource
+ enabled: false
+ # -- List of custom rules for the Repo server's Cluster Role resource
+ rules: []
+
+ ## Repo server service account
+ ## If create is set to true, make sure to uncomment the name and update the rbac section below
+ serviceAccount:
+ # -- Create repo server service account
+ create: true
+ # -- Repo server service account name
+ name: "" # "argocd-repo-server"
+ # -- Annotations applied to created service account
+ annotations: {}
+ # -- Labels applied to created service account
+ labels: {}
+ # -- Automount API credentials for the Service Account
+ automountServiceAccountToken: true
+
+ # -- Repo server rbac rules
+ rbac: []
+ # - apiGroups:
+ # - argoproj.io
+ # resources:
+ # - applications
+ # verbs:
+ # - get
+ # - list
+ # - watch
+
+## ApplicationSet controller
+applicationSet:
+ # -- Enable ApplicationSet controller
+ enabled: true
+
+ # -- ApplicationSet controller name string
+ name: applicationset-controller
+
+ # -- The number of ApplicationSet controller pods to run
+ replicaCount: 1
+
+ ## ApplicationSet controller Pod Disruption Budget
+ ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
+ pdb:
+ # -- Deploy a [PodDisruptionBudget] for the ApplicationSet controller
+ enabled: false
+ # -- Labels to be added to ApplicationSet controller pdb
+ labels: {}
+ # -- Annotations to be added to ApplicationSet controller pdb
+ annotations: {}
+ # -- Number of pods that are available after eviction as number or percentage (eg.: 50%)
+ # @default -- `""` (defaults to 0 if not specified)
+ minAvailable: ""
+ # -- Number of pods that are unavailble after eviction as number or percentage (eg.: 50%).
+ ## Has higher precedence over `applicationSet.pdb.minAvailable`
+ maxUnavailable: ""
+
+ ## ApplicationSet controller image
+ image:
+ # -- Repository to use for the ApplicationSet controller
+ # @default -- `""` (defaults to global.image.repository)
+ repository: ""
+ # -- Tag to use for the ApplicationSet controller
+ # @default -- `""` (defaults to global.image.tag)
+ tag: ""
+ # -- Image pull policy for the ApplicationSet controller
+ # @default -- `""` (defaults to global.image.imagePullPolicy)
+ imagePullPolicy: ""
+
+ # -- If defined, uses a Secret to pull an image from a private Docker registry or repository.
+ # @default -- `[]` (defaults to global.imagePullSecrets)
+ imagePullSecrets: []
+
+ # -- ApplicationSet controller log format. Either `text` or `json`
+ # @default -- `""` (defaults to global.logging.format)
+ logFormat: ""
+ # -- ApplicationSet controller log level. One of: `debug`, `info`, `warn`, `error`
+ # @default -- `""` (defaults to global.logging.level)
+ logLevel: ""
+
+ args:
+ # -- The default metric address
+ metricsAddr: :8080
+ # -- The default health check port
+ probeBindAddr: :8081
+ # -- How application is synced between the generator and the cluster
+ policy: sync
+ # -- Enable dry run mode
+ dryRun: false
+
+ # -- List of extra cli args to add
+ extraArgs: []
+
+ # -- Environment variables to pass to the ApplicationSet controller
+ extraEnv: []
+ # - name: "MY_VAR"
+ # value: "value"
+
+ # -- envFrom to pass to the ApplicationSet controller
+ # @default -- `[]` (See [values.yaml])
+ extraEnvFrom: []
+ # - configMapRef:
+ # name: config-map-name
+ # - secretRef:
+ # name: secret-name
+
+ # -- Additional containers to be added to the ApplicationSet controller pod
+ extraContainers: []
+
+ # -- List of extra mounts to add (normally used with extraVolumes)
+ extraVolumeMounts: []
+
+ # -- List of extra volumes to add
+ extraVolumes: []
+
+ ## Metrics service configuration
+ metrics:
+ # -- Deploy metrics service
+ enabled: false
+ service:
+ # -- Metrics service annotations
+ annotations: {}
+ # -- Metrics service labels
+ labels: {}
+ # -- Metrics service port
+ servicePort: 8085
+ # -- Metrics service port name
+ portName: http-metrics
+ serviceMonitor:
+ # -- Enable a prometheus ServiceMonitor
+ enabled: false
+ # -- Prometheus ServiceMonitor interval
+ interval: 30s
+ # -- Prometheus [RelabelConfigs] to apply to samples before scraping
+ relabelings: []
+ # -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestion
+ metricRelabelings: []
+ # -- Prometheus ServiceMonitor selector
+ selector: {}
+ # prometheus: kube-prometheus
+
+ # -- Prometheus ServiceMonitor scheme
+ scheme: ""
+ # -- Prometheus ServiceMonitor tlsConfig
+ tlsConfig: {}
+ # -- Prometheus ServiceMonitor namespace
+ namespace: "" # monitoring
+ # -- Prometheus ServiceMonitor labels
+ additionalLabels: {}
+ # -- Prometheus ServiceMonitor annotations
+ annotations: {}
+
+ ## ApplicationSet service configuration
+ service:
+ # -- ApplicationSet service annotations
+ annotations: {}
+ # -- ApplicationSet service labels
+ labels: {}
+ # -- ApplicationSet service port
+ port: 7000
+ # -- ApplicationSet service port name
+ portName: webhook
+
+ serviceAccount:
+ # -- Specifies whether a service account should be created
+ create: true
+ # -- Annotations to add to the service account
+ annotations: {}
+ # -- Labels applied to created service account
+ labels: {}
+ # -- The name of the service account to use.
+ # If not set and create is true, a name is generated using the fullname template
+ name: ""
+
+ # -- Annotations to be added to ApplicationSet controller Deployment
+ deploymentAnnotations: {}
+
+ # -- Annotations for the ApplicationSet controller pods
+ podAnnotations: {}
+
+ # -- Labels for the ApplicationSet controller pods
+ podLabels: {}
+
+ # -- Resource limits and requests for the ApplicationSet controller pods.
+ resources: {}
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+
+ # -- ApplicationSet controller container-level security context
+ # @default -- See [values.yaml]
+ containerSecurityContext:
+ runAsNonRoot: true
+ readOnlyRootFilesystem: true
+ allowPrivilegeEscalation: false
+ seccompProfile:
+ type: RuntimeDefault
+ capabilities:
+ drop:
+ - ALL
+
+ ## Probes for ApplicationSet controller (optional)
+ ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
+ readinessProbe:
+ # -- Enable Kubernetes liveness probe for ApplicationSet controller
+ enabled: false
+ # -- Number of seconds after the container has started before [probe] is initiated
+ initialDelaySeconds: 10
+ # -- How often (in seconds) to perform the [probe]
+ periodSeconds: 10
+ # -- Number of seconds after which the [probe] times out
+ timeoutSeconds: 1
+ # -- Minimum consecutive successes for the [probe] to be considered successful after having failed
+ successThreshold: 1
+ # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded
+ failureThreshold: 3
+
+ livenessProbe:
+ # -- Enable Kubernetes liveness probe for ApplicationSet controller
+ enabled: false
+ # -- Number of seconds after the container has started before [probe] is initiated
+ initialDelaySeconds: 10
+ # -- How often (in seconds) to perform the [probe]
+ periodSeconds: 10
+ # -- Number of seconds after which the [probe] times out
+ timeoutSeconds: 1
+ # -- Minimum consecutive successes for the [probe] to be considered successful after having failed
+ successThreshold: 1
+ # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded
+ failureThreshold: 3
+
+ # -- [Node selector]
+ nodeSelector: {}
+
+ # -- [Tolerations] for use with node taints
+ tolerations: []
+
+ # -- Assign custom [affinity] rules
+ affinity: {}
+
+ # -- If specified, indicates the pod's priority. If not specified, the pod priority will be default or zero if there is no default.
+ priorityClassName: ""
+
+ ## Webhook for the Git Generator
+ ## Ref: https://argocd-applicationset.readthedocs.io/en/master/Generators-Git/#webhook-configuration)
+ webhook:
+ ingress:
+ # -- Enable an ingress resource for Webhooks
+ enabled: false
+ # -- Additional ingress annotations
+ annotations: {}
+ # -- Additional ingress labels
+ labels: {}
+ # -- Defines which ingress ApplicationSet controller will implement the resource
+ ingressClassName: ""
+
+ # -- List of ingress hosts
+ ## Hostnames must be provided if Ingress is enabled.
+ ## Secrets must be manually created in the namespace
+ hosts: []
+ # - argocd-applicationset.example.com
+
+ # -- List of ingress paths
+ paths:
+ - /api/webhook
+ # -- Ingress path type. One of `Exact`, `Prefix` or `ImplementationSpecific`
+ pathType: Prefix
+ # -- Additional ingress paths
+ extraPaths: []
+ # - path: /*
+ # backend:
+ # serviceName: ssl-redirect
+ # servicePort: use-annotation
+ ## for Kubernetes >=1.19 (when "networking.k8s.io/v1" is used)
+ # - path: /*
+ # pathType: Prefix
+ # backend:
+ # service:
+ # name: ssl-redirect
+ # port:
+ # name: use-annotation
+
+ # -- Ingress TLS configuration
+ tls: []
+ # - secretName: argocd-applicationset-tls
+ # hosts:
+ # - argocd-applicationset.example.com
+
+## Notifications controller
+notifications:
+ # -- Enable notifications controller
+ enabled: true
+
+ # -- Notifications controller name string
+ name: notifications-controller
+
+ # -- Argo CD dashboard url; used in place of {{.context.argocdUrl}} in templates
+ argocdUrl:
+
+ ## Notifications controller Pod Disruption Budget
+ ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
+ pdb:
+ # -- Deploy a [PodDisruptionBudget] for the notifications controller
+ enabled: false
+ # -- Labels to be added to notifications controller pdb
+ labels: {}
+ # -- Annotations to be added to notifications controller pdb
+ annotations: {}
+ # -- Number of pods that are available after eviction as number or percentage (eg.: 50%)
+ # @default -- `""` (defaults to 0 if not specified)
+ minAvailable: ""
+ # -- Number of pods that are unavailble after eviction as number or percentage (eg.: 50%).
+ ## Has higher precedence over `notifications.pdb.minAvailable`
+ maxUnavailable: ""
+
+ ## Notifications controller image
+ image:
+ # -- Repository to use for the notifications controller
+ # @default -- `""` (defaults to global.image.repository)
+ repository: ""
+ # -- Tag to use for the notifications controller
+ # @default -- `""` (defaults to global.image.tag)
+ tag: ""
+ # -- Image pull policy for the notifications controller
+ # @default -- `""` (defaults to global.image.imagePullPolicy)
+ imagePullPolicy: ""
+
+ # -- Secrets with credentials to pull images from a private registry
+ # @default -- `[]` (defaults to global.imagePullSecrets)
+ imagePullSecrets: []
+
+ # -- Notifications controller log format. Either `text` or `json`
+ # @default -- `""` (defaults to global.logging.format)
+ logFormat: ""
+ # -- Notifications controller log level. One of: `debug`, `info`, `warn`, `error`
+ # @default -- `""` (defaults to global.logging.level)
+ logLevel: ""
+
+ # -- Extra arguments to provide to the notifications controller
+ extraArgs: []
+
+ # -- Additional container environment variables
+ extraEnv: []
+
+ # -- envFrom to pass to the notifications controller
+ # @default -- `[]` (See [values.yaml])
+ extraEnvFrom: []
+ # - configMapRef:
+ # name: config-map-name
+ # - secretRef:
+ # name: secret-name
+
+ # -- List of extra mounts to add (normally used with extraVolumes)
+ extraVolumeMounts: []
+
+ # -- List of extra volumes to add
+ extraVolumes: []
+
+ # -- Define user-defined context
+ ## For more information: https://argocd-notifications.readthedocs.io/en/stable/templates/#defining-user-defined-context
+ context: {}
+ # region: east
+ # environmentName: staging
+
+ secret:
+ # -- Whether helm chart creates notifications controller secret
+ create: true
+
+ # -- key:value pairs of annotations to be added to the secret
+ annotations: {}
+
+ # -- Generic key:value pairs to be inserted into the secret
+ ## Can be used for templates, notification services etc. Some examples given below.
+ ## For more information: https://argocd-notifications.readthedocs.io/en/stable/services/overview/
+ items: {}
+ # slack-token:
+ # # For more information: https://argocd-notifications.readthedocs.io/en/stable/services/slack/
+
+ # grafana-apiKey:
+ # # For more information: https://argocd-notifications.readthedocs.io/en/stable/services/grafana/
+
+ # webhooks-github-token:
+
+ # email-username:
+ # email-password:
+ # For more information: https://argocd-notifications.readthedocs.io/en/stable/services/email/
+
+ metrics:
+ # -- Enables prometheus metrics server
+ enabled: false
+ # -- Metrics port
+ port: 9001
+ service:
+ # -- Metrics service annotations
+ annotations: {}
+ # -- Metrics service labels
+ labels: {}
+ # -- Metrics service port name
+ portName: http-metrics
+ serviceMonitor:
+ # -- Enable a prometheus ServiceMonitor
+ enabled: false
+ # -- Prometheus ServiceMonitor selector
+ selector: {}
+ # prometheus: kube-prometheus
+ # -- Prometheus ServiceMonitor labels
+ additionalLabels: {}
+ # -- Prometheus ServiceMonitor annotations
+ annotations: {}
+ # namespace: monitoring
+ # interval: 30s
+ # scrapeTimeout: 10s
+ # -- Prometheus ServiceMonitor scheme
+ scheme: ""
+ # -- Prometheus ServiceMonitor tlsConfig
+ tlsConfig: {}
+ # -- Prometheus [RelabelConfigs] to apply to samples before scraping
+ relabelings: []
+ # -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestion
+ metricRelabelings: []
+
+ # -- Configures notification services such as slack, email or custom webhook
+ # @default -- See [values.yaml]
+ ## For more information: https://argocd-notifications.readthedocs.io/en/stable/services/overview/
+ notifiers: {}
+ # service.slack: |
+ # token: $slack-token
+
+ # -- Annotations to be applied to the notifications controller Deployment
+ deploymentAnnotations: {}
+
+ # -- Annotations to be applied to the notifications controller Pods
+ podAnnotations: {}
+
+ # -- Labels to be applied to the notifications controller Pods
+ podLabels: {}
+
+ # -- Resource limits and requests for the notifications controller
+ resources: {}
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+
+ # -- Notification controller container-level security Context
+ # @default -- See [values.yaml]
+ containerSecurityContext:
+ runAsNonRoot: true
+ readOnlyRootFilesystem: true
+ allowPrivilegeEscalation: false
+ seccompProfile:
+ type: RuntimeDefault
+ capabilities:
+ drop:
+ - ALL
+
+ # -- [Node selector]
+ nodeSelector: {}
+
+ # -- [Tolerations] for use with node taints
+ tolerations: []
+
+ # -- Assign custom [affinity] rules
+ affinity: {}
+
+ # -- Priority class for the notifications controller pods
+ priorityClassName: ""
+
+ serviceAccount:
+ # -- Specifies whether a service account should be created
+ create: true
+
+ # -- The name of the service account to use.
+ ## If not set and create is true, a name is generated using the fullname template
+ name: argocd-notifications-controller
+
+ # -- Annotations applied to created service account
+ annotations: {}
+
+ # -- Labels applied to created service account
+ labels: {}
+ cm:
+ # -- Whether helm chart creates notifications controller config map
+ create: true
+
+ # -- Contains centrally managed global application subscriptions
+ ## For more information: https://argocd-notifications.readthedocs.io/en/stable/subscriptions/
+ subscriptions: []
+ # # subscription for on-sync-status-unknown trigger notifications
+ # - recipients:
+ # - slack:test2
+ # - email:test@gmail.com
+ # triggers:
+ # - on-sync-status-unknown
+ # # subscription restricted to applications with matching labels only
+ # - recipients:
+ # - slack:test3
+ # selector: test=true
+ # triggers:
+ # - on-sync-status-unknown
+
+ # -- The notification template is used to generate the notification content
+ ## For more information: https://argocd-notifications.readthedocs.io/en/stable/templates/
+ templates: {}
+ # template.app-deployed: |
+ # email:
+ # subject: New version of an application {{.app.metadata.name}} is up and running.
+ # message: |
+ # {{if eq .serviceType "slack"}}:white_check_mark:{{end}} Application {{.app.metadata.name}} is now running new version of deployments manifests.
+ # slack:
+ # attachments: |
+ # [{
+ # "title": "{{ .app.metadata.name}}",
+ # "title_link":"{{.context.argocdUrl}}/applications/{{.app.metadata.name}}",
+ # "color": "#18be52",
+ # "fields": [
+ # {
+ # "title": "Sync Status",
+ # "value": "{{.app.status.sync.status}}",
+ # "short": true
+ # },
+ # {
+ # "title": "Repository",
+ # "value": "{{.app.spec.source.repoURL}}",
+ # "short": true
+ # },
+ # {
+ # "title": "Revision",
+ # "value": "{{.app.status.sync.revision}}",
+ # "short": true
+ # }
+ # {{range $index, $c := .app.status.conditions}}
+ # {{if not $index}},{{end}}
+ # {{if $index}},{{end}}
+ # {
+ # "title": "{{$c.type}}",
+ # "value": "{{$c.message}}",
+ # "short": true
+ # }
+ # {{end}}
+ # ]
+ # }]
+ # template.app-health-degraded: |
+ # email:
+ # subject: Application {{.app.metadata.name}} has degraded.
+ # message: |
+ # {{if eq .serviceType "slack"}}:exclamation:{{end}} Application {{.app.metadata.name}} has degraded.
+ # Application details: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}.
+ # slack:
+ # attachments: |-
+ # [{
+ # "title": "{{ .app.metadata.name}}",
+ # "title_link": "{{.context.argocdUrl}}/applications/{{.app.metadata.name}}",
+ # "color": "#f4c030",
+ # "fields": [
+ # {
+ # "title": "Sync Status",
+ # "value": "{{.app.status.sync.status}}",
+ # "short": true
+ # },
+ # {
+ # "title": "Repository",
+ # "value": "{{.app.spec.source.repoURL}}",
+ # "short": true
+ # }
+ # {{range $index, $c := .app.status.conditions}}
+ # {{if not $index}},{{end}}
+ # {{if $index}},{{end}}
+ # {
+ # "title": "{{$c.type}}",
+ # "value": "{{$c.message}}",
+ # "short": true
+ # }
+ # {{end}}
+ # ]
+ # }]
+ # template.app-sync-failed: |
+ # email:
+ # subject: Failed to sync application {{.app.metadata.name}}.
+ # message: |
+ # {{if eq .serviceType "slack"}}:exclamation:{{end}} The sync operation of application {{.app.metadata.name}} has failed at {{.app.status.operationState.finishedAt}} with the following error: {{.app.status.operationState.message}}
+ # Sync operation details are available at: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}?operation=true .
+ # slack:
+ # attachments: |-
+ # [{
+ # "title": "{{ .app.metadata.name}}",
+ # "title_link":"{{.context.argocdUrl}}/applications/{{.app.metadata.name}}",
+ # "color": "#E96D76",
+ # "fields": [
+ # {
+ # "title": "Sync Status",
+ # "value": "{{.app.status.sync.status}}",
+ # "short": true
+ # },
+ # {
+ # "title": "Repository",
+ # "value": "{{.app.spec.source.repoURL}}",
+ # "short": true
+ # }
+ # {{range $index, $c := .app.status.conditions}}
+ # {{if not $index}},{{end}}
+ # {{if $index}},{{end}}
+ # {
+ # "title": "{{$c.type}}",
+ # "value": "{{$c.message}}",
+ # "short": true
+ # }
+ # {{end}}
+ # ]
+ # }]
+ # template.app-sync-running: |
+ # email:
+ # subject: Start syncing application {{.app.metadata.name}}.
+ # message: |
+ # The sync operation of application {{.app.metadata.name}} has started at {{.app.status.operationState.startedAt}}.
+ # Sync operation details are available at: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}?operation=true .
+ # slack:
+ # attachments: |-
+ # [{
+ # "title": "{{ .app.metadata.name}}",
+ # "title_link":"{{.context.argocdUrl}}/applications/{{.app.metadata.name}}",
+ # "color": "#0DADEA",
+ # "fields": [
+ # {
+ # "title": "Sync Status",
+ # "value": "{{.app.status.sync.status}}",
+ # "short": true
+ # },
+ # {
+ # "title": "Repository",
+ # "value": "{{.app.spec.source.repoURL}}",
+ # "short": true
+ # }
+ # {{range $index, $c := .app.status.conditions}}
+ # {{if not $index}},{{end}}
+ # {{if $index}},{{end}}
+ # {
+ # "title": "{{$c.type}}",
+ # "value": "{{$c.message}}",
+ # "short": true
+ # }
+ # {{end}}
+ # ]
+ # }]
+ # template.app-sync-status-unknown: |
+ # email:
+ # subject: Application {{.app.metadata.name}} sync status is 'Unknown'
+ # message: |
+ # {{if eq .serviceType "slack"}}:exclamation:{{end}} Application {{.app.metadata.name}} sync is 'Unknown'.
+ # Application details: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}.
+ # {{if ne .serviceType "slack"}}
+ # {{range $c := .app.status.conditions}}
+ # * {{$c.message}}
+ # {{end}}
+ # {{end}}
+ # slack:
+ # attachments: |-
+ # [{
+ # "title": "{{ .app.metadata.name}}",
+ # "title_link":"{{.context.argocdUrl}}/applications/{{.app.metadata.name}}",
+ # "color": "#E96D76",
+ # "fields": [
+ # {
+ # "title": "Sync Status",
+ # "value": "{{.app.status.sync.status}}",
+ # "short": true
+ # },
+ # {
+ # "title": "Repository",
+ # "value": "{{.app.spec.source.repoURL}}",
+ # "short": true
+ # }
+ # {{range $index, $c := .app.status.conditions}}
+ # {{if not $index}},{{end}}
+ # {{if $index}},{{end}}
+ # {
+ # "title": "{{$c.type}}",
+ # "value": "{{$c.message}}",
+ # "short": true
+ # }
+ # {{end}}
+ # ]
+ # }]
+ # template.app-sync-succeeded: |
+ # email:
+ # subject: Application {{.app.metadata.name}} has been successfully synced.
+ # message: |
+ # {{if eq .serviceType "slack"}}:white_check_mark:{{end}} Application {{.app.metadata.name}} has been successfully synced at {{.app.status.operationState.finishedAt}}.
+ # Sync operation details are available at: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}?operation=true .
+ # slack:
+ # attachments: |-
+ # [{
+ # "title": "{{ .app.metadata.name}}",
+ # "title_link":"{{.context.argocdUrl}}/applications/{{.app.metadata.name}}",
+ # "color": "#18be52",
+ # "fields": [
+ # {
+ # "title": "Sync Status",
+ # "value": "{{.app.status.sync.status}}",
+ # "short": true
+ # },
+ # {
+ # "title": "Repository",
+ # "value": "{{.app.spec.source.repoURL}}",
+ # "short": true
+ # }
+ # {{range $index, $c := .app.status.conditions}}
+ # {{if not $index}},{{end}}
+ # {{if $index}},{{end}}
+ # {
+ # "title": "{{$c.type}}",
+ # "value": "{{$c.message}}",
+ # "short": true
+ # }
+ # {{end}}
+ # ]
+ # }]
+
+ # -- The trigger defines the condition when the notification should be sent
+ ## For more information: https://argocd-notifications.readthedocs.io/en/stable/triggers/
+ triggers: {}
+ # trigger.on-deployed: |
+ # - description: Application is synced and healthy. Triggered once per commit.
+ # oncePer: app.status.sync.revision
+ # send:
+ # - app-deployed
+ # when: app.status.operationState.phase in ['Succeeded'] and app.status.health.status == 'Healthy'
+ # trigger.on-health-degraded: |
+ # - description: Application has degraded
+ # send:
+ # - app-health-degraded
+ # when: app.status.health.status == 'Degraded'
+ # trigger.on-sync-failed: |
+ # - description: Application syncing has failed
+ # send:
+ # - app-sync-failed
+ # when: app.status.operationState.phase in ['Error', 'Failed']
+ # trigger.on-sync-running: |
+ # - description: Application is being synced
+ # send:
+ # - app-sync-running
+ # when: app.status.operationState.phase in ['Running']
+ # trigger.on-sync-status-unknown: |
+ # - description: Application status is 'Unknown'
+ # send:
+ # - app-sync-status-unknown
+ # when: app.status.sync.status == 'Unknown'
+ # trigger.on-sync-succeeded: |
+ # - description: Application syncing has succeeded
+ # send:
+ # - app-sync-succeeded
+ # when: app.status.operationState.phase in ['Succeeded']
+ #
+ # For more information: https://argocd-notifications.readthedocs.io/en/stable/triggers/#default-triggers
+ # defaultTriggers: |
+ # - on-sync-status-unknown
+
+ ## The optional bot component simplifies managing subscriptions
+ ## For more information: https://argocd-notifications.readthedocs.io/en/stable/bots/overview/
+ bots:
+ slack:
+ # -- Enable slack bot
+ ## You have to set secret.notifiers.slack.signingSecret
+ enabled: false
+
+ ## Slack bot Pod Disruption Budget
+ ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
+ pdb:
+ # -- Deploy a [PodDisruptionBudget] for the Slack bot
+ enabled: false
+ # -- Labels to be added to Slack bot pdb
+ labels: {}
+ # -- Annotations to be added to Slack bot pdb
+ annotations: {}
+ # -- Number of pods that are available after eviction as number or percentage (eg.: 50%)
+ # @default -- `""` (defaults to 0 if not specified)
+ minAvailable: ""
+ # -- Number of pods that are unavailble after eviction as number or percentage (eg.: 50%).
+ ## Has higher precedence over `notifications.bots.slack.pdb.minAvailable`
+ maxUnavailable: ""
+
+ ## Slack bot image
+ image:
+ # -- Repository to use for the Slack bot
+ # @default -- `""` (defaults to global.image.repository)
+ repository: ""
+ # -- Tag to use for the Slack bot
+ # @default -- `""` (defaults to global.image.tag)
+ tag: ""
+ # -- Image pull policy for the Slack bot
+ # @default -- `""` (defaults to global.image.imagePullPolicy)
+ imagePullPolicy: ""
+
+ # -- Secrets with credentials to pull images from a private registry
+ # @default -- `[]` (defaults to global.imagePullSecrets)
+ imagePullSecrets: []
+
+ service:
+ # -- Service annotations for Slack bot
+ annotations: {}
+ # -- Service port for Slack bot
+ port: 80
+ # -- Service type for Slack bot
+ type: LoadBalancer
+
+ serviceAccount:
+ # -- Specifies whether a service account should be created
+ create: true
+
+ # -- The name of the service account to use.
+ ## If not set and create is true, a name is generated using the fullname template
+ name: argocd-notifications-bot
+
+ # -- Annotations applied to created service account
+ annotations: {}
+
+ # -- Slack bot container-level security Context
+ # @default -- See [values.yaml]
+ containerSecurityContext:
+ runAsNonRoot: true
+ readOnlyRootFilesystem: true
+ allowPrivilegeEscalation: false
+ seccompProfile:
+ type: RuntimeDefault
+ capabilities:
+ drop:
+ - ALL
+
+ # -- Resource limits and requests for the Slack bot
+ resources: {}
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+
+ # -- Assign custom [affinity] rules
+ affinity: {}
+
+ # -- [Tolerations] for use with node taints
+ tolerations: []
+
+ # -- [Node selector]
+ nodeSelector: {}
\ No newline at end of file
diff --git a/kube/services/arranger-dashboard/arranger-dashboard-deploy.yaml b/kube/services/arranger-dashboard/arranger-dashboard-deploy.yaml
index ae9a26a43..8707a79d5 100644
--- a/kube/services/arranger-dashboard/arranger-dashboard-deploy.yaml
+++ b/kube/services/arranger-dashboard/arranger-dashboard-deploy.yaml
@@ -22,7 +22,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -31,6 +31,22 @@ spec:
values:
- arranger
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
automountServiceAccountToken: false
containers:
- name: arranger-dashboard
diff --git a/kube/services/arranger/arranger-deploy.yaml b/kube/services/arranger/arranger-deploy.yaml
index 57e19ae29..31d715d7c 100644
--- a/kube/services/arranger/arranger-deploy.yaml
+++ b/kube/services/arranger/arranger-deploy.yaml
@@ -23,7 +23,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -32,6 +32,22 @@ spec:
values:
- arranger
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
automountServiceAccountToken: false
volumes:
- name: arranger-config
diff --git a/kube/services/audit-service/audit-service-deploy.yaml b/kube/services/audit-service/audit-service-deploy.yaml
index 78e7d6df1..935cab408 100644
--- a/kube/services/audit-service/audit-service-deploy.yaml
+++ b/kube/services/audit-service/audit-service-deploy.yaml
@@ -32,7 +32,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -41,6 +41,22 @@ spec:
values:
- audit-service
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
volumes:
- name: config-volume
@@ -79,11 +95,10 @@ spec:
subPath: "audit-service-config.yaml"
resources:
requests:
- cpu: 0.4
- memory: 512Mi
+ cpu: 100m
+ memory: 100Mi
limits:
- cpu: 0.8
- memory: 1024Mi
+ memory: 512Mi
initContainers:
- name: audit-db-migrate
GEN3_AUDIT-SERVICE_IMAGE
diff --git a/kube/services/auspice/auspice-deploy.yaml b/kube/services/auspice/auspice-deploy.yaml
index 88324fec4..ce228be9f 100644
--- a/kube/services/auspice/auspice-deploy.yaml
+++ b/kube/services/auspice/auspice-deploy.yaml
@@ -23,7 +23,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -32,6 +32,22 @@ spec:
values:
- auspice
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
containers:
- name: auspice
@@ -64,8 +80,7 @@ spec:
imagePullPolicy: Always
resources:
requests:
- cpu: 0.5
- memory: 1024Mi
+ cpu: 100m
+ memory: 128Mi
limits:
- cpu: 1
- memory: 2400Mi
+ memory: 1024Mi
diff --git a/kube/services/autoscaler/cluster-autoscaler-autodiscover.yaml b/kube/services/autoscaler/cluster-autoscaler-autodiscover.yaml
index c863a67f7..e99e3fd15 100644
--- a/kube/services/autoscaler/cluster-autoscaler-autodiscover.yaml
+++ b/kube/services/autoscaler/cluster-autoscaler-autodiscover.yaml
@@ -35,6 +35,7 @@ rules:
- apiGroups: [""]
resources:
- "pods"
+ - "namespaces"
- "services"
- "replicationcontrollers"
- "persistentvolumeclaims"
@@ -152,7 +153,6 @@ spec:
name: cluster-autoscaler
resources:
limits:
- cpu: 1000m
memory: 1600Mi
requests:
cpu: 100m
diff --git a/kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml b/kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml
index a2a3170d5..34f18d973 100644
--- a/kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml
+++ b/kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml
@@ -22,11 +22,29 @@ spec:
netvpc: "yes"
GEN3_DATE_LABEL
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
volumes:
- name: credentials
secret:
secretName: "aws-es-proxy"
+ priorityClassName: aws-es-proxy-high-priority
containers:
- name: esproxy
GEN3_AWS-ES-PROXY_IMAGE|-image: quay.io/cdis/aws-es-proxy:0.8-|
@@ -67,5 +85,5 @@ spec:
cpu: 250m
memory: 256Mi
limits:
- cpu: 1
+ cpu: 1000m
memory: 2Gi
diff --git a/kube/services/aws-es-proxy/aws-es-proxy-priority-class.yaml b/kube/services/aws-es-proxy/aws-es-proxy-priority-class.yaml
new file mode 100644
index 000000000..6bd619a22
--- /dev/null
+++ b/kube/services/aws-es-proxy/aws-es-proxy-priority-class.yaml
@@ -0,0 +1,7 @@
+apiVersion: scheduling.k8s.io/v1
+kind: PriorityClass
+metadata:
+ name: aws-es-proxy-high-priority
+value: 1000000
+globalDefault: false
+description: "Priority class for aws-es-proxy service"
diff --git a/kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml b/kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml
index 954bc5f06..fa6b741a2 100644
--- a/kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml
+++ b/kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml
@@ -24,7 +24,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -33,6 +33,22 @@ spec:
values:
- cedar-wrapper
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
automountServiceAccountToken: false
volumes:
- name: ca-volume
@@ -64,10 +80,9 @@ spec:
failureThreshold: 6
resources:
requests:
- cpu: 0.6
- memory: 512Mi
+ cpu: 100m
+ memory: 64Mi
limits:
- cpu: 2
memory: 4096Mi
ports:
- containerPort: 8000
diff --git a/kube/services/cogwheel/cogwheel-deploy.yaml b/kube/services/cogwheel/cogwheel-deploy.yaml
index ef274220a..c66f4d3b3 100644
--- a/kube/services/cogwheel/cogwheel-deploy.yaml
+++ b/kube/services/cogwheel/cogwheel-deploy.yaml
@@ -12,6 +12,23 @@ spec:
app: cogwheel
GEN3_DATE_LABEL
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
volumes:
- name: cogwheel-g3auto
secret:
diff --git a/kube/services/cohort-middleware/cohort-middleware-deploy.yaml b/kube/services/cohort-middleware/cohort-middleware-deploy.yaml
index e301856e5..c7c411f4c 100644
--- a/kube/services/cohort-middleware/cohort-middleware-deploy.yaml
+++ b/kube/services/cohort-middleware/cohort-middleware-deploy.yaml
@@ -30,7 +30,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -39,11 +39,28 @@ spec:
values:
- cohort-middleware
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
automountServiceAccountToken: false
volumes:
- - name: cohort-middleware-config
+ - name: cohort-middleware-g3auto
secret:
- secretName: cohort-middleware-config
+ secretName: cohort-middleware-g3auto
+ optional: true
containers:
- name: cohort-middleware
GEN3_COHORT-MIDDLEWARE_IMAGE|-image: quay.io/cdis/cohort-middleware:latest-|
@@ -94,15 +111,14 @@ spec:
ports:
- containerPort: 8080
volumeMounts:
- - name: cohort-middleware-config
+ - name: cohort-middleware-g3auto
readOnly: true
mountPath: /config/development.yaml
subPath: development.yaml
imagePullPolicy: Always
resources:
requests:
- cpu: 500m
- memory: 4Gi
+ cpu: 100m
+ memory: 128Mi
limits:
- cpu: 500m
memory: 4Gi
diff --git a/kube/services/dashboard/dashboard-deploy.yaml b/kube/services/dashboard/dashboard-deploy.yaml
index 14a3379cc..451d99552 100644
--- a/kube/services/dashboard/dashboard-deploy.yaml
+++ b/kube/services/dashboard/dashboard-deploy.yaml
@@ -29,7 +29,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -38,6 +38,22 @@ spec:
values:
- dashboard
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
automountServiceAccountToken: false
volumes:
- name: config-volume
@@ -62,10 +78,9 @@ spec:
mountPath: "/etc/gen3"
resources:
requests:
- cpu: 0.3
- memory: 200Mi
+ cpu: 100m
+ memory: 20Mi
limits:
- cpu: 0.5
memory: 500Mi
imagePullPolicy: Always
livenessProbe:
diff --git a/kube/services/datadog/datadog-application.yaml b/kube/services/datadog/datadog-application.yaml
new file mode 100644
index 000000000..19e0e1d86
--- /dev/null
+++ b/kube/services/datadog/datadog-application.yaml
@@ -0,0 +1,27 @@
+apiVersion: argoproj.io/v1alpha1
+kind: Application
+metadata:
+ name: datadog-application
+ namespace: argocd
+spec:
+ project: default
+ sources:
+ - chart: datadog
+ repoURL: 'https://helm.datadoghq.com'
+ targetRevision: 3.6.4
+ helm:
+ valueFiles:
+ - $values/kube/services/datadog/values.yaml
+ releaseName: datadog
+ - repoURL: 'https://github.com/uc-cdis/cloud-automation.git'
+ targetRevision: master
+ ref: values
+ destination:
+ server: 'https://kubernetes.default.svc'
+ namespace: datadog
+ syncPolicy:
+ automated:
+ prune: true
+ selfHeal: true
+ syncOptions:
+ - CreateNamespace=true
diff --git a/kube/services/datadog/datadog_db_user.json b/kube/services/datadog/datadog_db_user.json
new file mode 100644
index 000000000..0eca1be9f
--- /dev/null
+++ b/kube/services/datadog/datadog_db_user.json
@@ -0,0 +1,4 @@
+{
+ "datadog_db_user": "datadog",
+ "datadog_db_password": null
+}
\ No newline at end of file
diff --git a/kube/services/datadog/postgres.yaml b/kube/services/datadog/postgres.yaml
new file mode 100644
index 000000000..f85dc0970
--- /dev/null
+++ b/kube/services/datadog/postgres.yaml
@@ -0,0 +1,8 @@
+cluster_check: true
+init_config:
+instances:
+ - dbm: true
+ host:
+ port: 5432
+ username: datadog
+ password:
\ No newline at end of file
diff --git a/kube/services/datadog/values.yaml b/kube/services/datadog/values.yaml
index 95ec57239..fc0bbab8b 100644
--- a/kube/services/datadog/values.yaml
+++ b/kube/services/datadog/values.yaml
@@ -10,14 +10,28 @@ datadog:
useHostPort: true
nonLocalTraffic: true
+ #This is used to configure a lot of checks that Datadog does. Normally, we would annotate a service, but since we
+ #use aurora, we'll have to configure from confd instead
+
#Enables Optional Universal Service Monitoring
## ref: https://docs.datadoghq.com/tracing/universal_service_monitoring/?tab=helm
serviceMonitoring:
- enabled: true
+ enabled: false
# datadog.apiKeyExistingSecret -- Use existing Secret which stores API key instead of creating a new one. The value should be set with the `api-key` key inside the secret.
## If set, this parameter takes precedence over "apiKey".
- apiKeyExistingSecret: "datadog-agent"
+ apiKeyExistingSecret: "ddgov-apikey"
+
+ # datadog.site -- The site of the Datadog intake to send Agent data to.
+ # (documentation: https://docs.datadoghq.com/getting_started/site/)
+
+ ## Set to 'datadoghq.com' to send data to the US1 site (default).
+ ## Set to 'datadoghq.eu' to send data to the EU site.
+ ## Set to 'us3.datadoghq.com' to send data to the US3 site.
+ ## Set to 'us5.datadoghq.com' to send data to the US5 site.
+ ## Set to 'ddog-gov.com' to send data to the US1-FED site.
+ ## Set to 'ap1.datadoghq.com' to send data to the AP1 site.
+ site: ddog-gov.com
# datadog.kubeStateMetricsEnabled -- If true, deploys the kube-state-metrics deployment
## ref: https://github.com/kubernetes/kube-state-metrics/tree/kube-state-metrics-helm-chart-2.13.2/charts/kube-state-metrics
@@ -56,11 +70,13 @@ datadog:
apm:
# datadog.apm.socketEnabled -- Enable APM over Socket (Unix Socket or windows named pipe)
## ref: https://docs.datadoghq.com/agent/kubernetes/apm/
- socketEnabled: true
+ socketEnabled: false
# datadog.apm.portEnabled -- Enable APM over TCP communication (port 8126 by default)
## ref: https://docs.datadoghq.com/agent/kubernetes/apm/
- portEnabled: true
+ portEnabled: false
+
+ enabled: false
# datadog.apm.port -- Override the trace Agent port
## Note: Make sure your client is sending to the same UDP port.
@@ -77,18 +93,25 @@ datadog:
# datadog.processAgent.processCollection -- Set this to true to enable process collection in process monitoring agent
## Requires processAgent.enabled to be set to true to have any effect
- processCollection: true
+ processCollection: false
# datadog.processAgent.stripProcessArguments -- Set this to scrub all arguments from collected processes
## Requires processAgent.enabled and processAgent.processCollection to be set to true to have any effect
## ref: https://docs.datadoghq.com/infrastructure/process/?tab=linuxwindows#process-arguments-scrubbing
- stripProcessArguments: true
+ stripProcessArguments: false
# datadog.processAgent.processDiscovery -- Enables or disables autodiscovery of integrations
- processDiscovery: true
+ processDiscovery: false
## Enable systemProbe agent and provide custom configs
systemProbe:
+ resources:
+ requests:
+ cpu: 100m
+ memory: 200Mi
+ limits:
+ cpu: 100m
+ memory: 200Mi
# datadog.systemProbe.debugPort -- Specify the port to expose pprof and expvar for system-probe agent
debugPort: 0
@@ -161,7 +184,7 @@ datadog:
networkMonitoring:
# datadog.networkMonitoring.enabled -- Enable network performance monitoring
- enabled: true
+ enabled: false
## Enable security agent and provide custom configs
@@ -211,7 +234,8 @@ datadog:
# - send_distribution_buckets: true
# timeout: 5
-
+ containerExcludeLogs: "kube_namespace:logging kube_namespace:argo name:pelican-export* name:job-task"
+ containerExclude: "kube_namespace:logging kube_namespace:kube-system kube_namespace:kubecost kube_namespace:argo kube_namespace:cortex-xdr kube_namespace:monitoring kube_namespace:datadog"
## This is the Datadog Cluster Agent implementation that handles cluster-wide
## metrics more cleanly, separates concerns for better rbac, and implements
## the external metrics API so you can autoscale HPAs based on datadog metrics
@@ -266,10 +290,6 @@ agents:
# agents.tolerations -- Allow the DaemonSet to schedule on tainted nodes (requires Kubernetes >= 1.6)
tolerations:
- - effect: NoSchedule
- key: role
- operator: Equal
- value: workflow
- effect: NoSchedule
key: role
operator: Equal
@@ -320,4 +340,3 @@ agents:
# agents.rbac.serviceAccountAnnotations -- Annotations to add to the ServiceAccount if agents.rbac.create is true
serviceAccountAnnotations: {}
-
diff --git a/kube/services/datasim/datasim-deploy.yaml b/kube/services/datasim/datasim-deploy.yaml
index c48075b89..0f6f21d68 100644
--- a/kube/services/datasim/datasim-deploy.yaml
+++ b/kube/services/datasim/datasim-deploy.yaml
@@ -22,7 +22,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -31,6 +31,22 @@ spec:
values:
- datasim
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
volumes:
- name: yaml-merge
diff --git a/kube/services/dicom-server/dicom-server-deploy.yaml b/kube/services/dicom-server/dicom-server-deploy.yaml
index b2ef0834e..43bd90e5d 100644
--- a/kube/services/dicom-server/dicom-server-deploy.yaml
+++ b/kube/services/dicom-server/dicom-server-deploy.yaml
@@ -17,6 +17,23 @@ spec:
public: "yes"
GEN3_DATE_LABEL
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
volumes:
- name: config-volume-g3auto
secret:
diff --git a/kube/services/dicom-viewer/dicom-viewer-deploy.yaml b/kube/services/dicom-viewer/dicom-viewer-deploy.yaml
index d1fb8ce55..9df6fbc93 100644
--- a/kube/services/dicom-viewer/dicom-viewer-deploy.yaml
+++ b/kube/services/dicom-viewer/dicom-viewer-deploy.yaml
@@ -17,6 +17,23 @@ spec:
public: "yes"
GEN3_DATE_LABEL
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
containers:
- name: dicom-viewer
GEN3_DICOM-VIEWER_IMAGE
diff --git a/kube/services/dicom-viewer/dicom-viewer-service.yaml b/kube/services/dicom-viewer/dicom-viewer-service.yaml
index ea2576584..26f3a21b0 100644
--- a/kube/services/dicom-viewer/dicom-viewer-service.yaml
+++ b/kube/services/dicom-viewer/dicom-viewer-service.yaml
@@ -12,4 +12,4 @@ spec:
nodePort: null
name: http
type: ClusterIP
-
\ No newline at end of file
+
diff --git a/kube/services/fence/fence-canary-deploy.yaml b/kube/services/fence/fence-canary-deploy.yaml
index 12e5a8ee8..513a1a998 100644
--- a/kube/services/fence/fence-canary-deploy.yaml
+++ b/kube/services/fence/fence-canary-deploy.yaml
@@ -29,7 +29,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -38,6 +38,22 @@ spec:
values:
- fence
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
volumes:
- name: yaml-merge
diff --git a/kube/services/fence/fence-deploy.yaml b/kube/services/fence/fence-deploy.yaml
index 95d2b5496..1722676e0 100644
--- a/kube/services/fence/fence-deploy.yaml
+++ b/kube/services/fence/fence-deploy.yaml
@@ -35,7 +35,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -44,6 +44,22 @@ spec:
values:
- fence
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
volumes:
# -----------------------------------------------------------------------------
diff --git a/kube/services/fenceshib/fenceshib-canary-deploy.yaml b/kube/services/fenceshib/fenceshib-canary-deploy.yaml
index 152edefec..74085009f 100644
--- a/kube/services/fenceshib/fenceshib-canary-deploy.yaml
+++ b/kube/services/fenceshib/fenceshib-canary-deploy.yaml
@@ -30,7 +30,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -39,6 +39,15 @@ spec:
values:
- fence
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
volumes:
- name: yaml-merge
diff --git a/kube/services/fenceshib/fenceshib-configmap.yaml b/kube/services/fenceshib/fenceshib-configmap.yaml
index 2412518c0..b8e55243d 100644
--- a/kube/services/fenceshib/fenceshib-configmap.yaml
+++ b/kube/services/fenceshib/fenceshib-configmap.yaml
@@ -231,48 +231,48 @@ data:
few exceptions for newer attributes where the name is the same for both versions. You will
usually want to uncomment or map the names for both SAML versions as a unit.
-->
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
@@ -286,7 +286,7 @@ data:
-
+
@@ -416,47 +416,51 @@ data:
- MIIGeDCCBWCgAwIBAgITKwAE3xjJ0BmsXYl8hwAAAATfGDANBgkqhkiG9w0BAQsF
- ADBOMRUwEwYKCZImiZPyLGQBGRYFTE9DQUwxHDAaBgoJkiaJk/IsZAEZFgxESEhT
- U0VDVVJJVFkxFzAVBgNVBAMTDk5JSC1EUEtJLUNBLTFBMB4XDTIxMDMyMzEwMjMz
- MloXDTIzMDMyMzEwMjMzMlowcDELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAk1EMREw
- DwYDVQQHEwhCZXRoZXNkYTEMMAoGA1UEChMDSEhTMQwwCgYDVQQLEwNOSUgxJTAj
- BgNVBAMTHHdhbXNpZ25pbmdmZWRlcmF0aW9uLm5paC5nb3YwggEiMA0GCSqGSIb3
- DQEBAQUAA4IBDwAwggEKAoIBAQDrng8ItLe/PdN7+GT50g0xd4Kc5zVLk5JhHV/M
- C0ICo3ulYpNnK8f0vGYvKXhG9B4gyYjjAVgY8dHL1Yi9Vw4OCMHiAhT80qidFhah
- xdcz8EaKWueqlMV+SZ8/6luahSmYYjKHAxICMg253gHsG6A64pWBsf58fzOYeEV/
- HIItkthIJ7Rh71gXeZwmcir3fAve1sQXrgXsRb265yFQaxLrRI+QA7k+Tiemlt4+
- 7wBOXdROm0kxGJT6u6+IG8g2Qdbc1JWaAmwROGCByREQzfMNUVpXCXJHhKSrHype
- z8Z0o4p2sLXyOysbBAmNoShMhvaaPlsrJt7PyDN5uj6KaXNNAgMBAAGjggMrMIID
- JzAdBgNVHQ4EFgQUb/4wTaSXJ6P1tAmI8mWJhMv1VHowHwYDVR0jBBgwFoAUeWw4
- jBnSyRkHcaYQ+YnwrdCDBZMwggESBgNVHR8EggEJMIIBBTCCAQGggf6ggfuGgcFs
- ZGFwOi8vL0NOPU5JSC1EUEtJLUNBLTFBLENOPU5JSERQS0lDQVNWQyxDTj1DRFAs
- Q049UHVibGljJTIwS2V5JTIwU2VydmljZXMsQ049U2VydmljZXMsQ049Q29uZmln
- dXJhdGlvbixEQz1ESEhTU0VDVVJJVFksREM9TE9DQUw/Y2VydGlmaWNhdGVSZXZv
- Y2F0aW9uTGlzdD9iYXNlP29iamVjdENsYXNzPWNSTERpc3RyaWJ1dGlvblBvaW50
- hjVodHRwOi8vTklIRFBLSUNSTC5OSUguR09WL0NlcnREYXRhL05JSC1EUEtJLUNB
- LTFBLmNybDCCATkGCCsGAQUFBwEBBIIBKzCCAScwgbQGCCsGAQUFBzAChoGnbGRh
- cDovLy9DTj1OSUgtRFBLSS1DQS0xQSxDTj1BSUEsQ049UHVibGljJTIwS2V5JTIw
- U2VydmljZXMsQ049U2VydmljZXMsQ049Q29uZmlndXJhdGlvbixEQz1ESEhTU0VD
- VVJJVFksREM9TE9DQUw/Y0FDZXJ0aWZpY2F0ZT9iYXNlP29iamVjdENsYXNzPWNl
- cnRpZmljYXRpb25BdXRob3JpdHkwQQYIKwYBBQUHMAKGNWh0dHA6Ly9OSUhEUEtJ
- Q1JMLk5JSC5HT1YvQ2VydERhdGEvTklILURQS0ktQ0EtMUEuY3J0MCsGCCsGAQUF
- BzABhh9odHRwOi8vTklIRFBLSU9DU1AuTklILkdPVi9vY3NwMAsGA1UdDwQEAwIF
- oDA9BgkrBgEEAYI3FQcEMDAuBiYrBgEEAYI3FQiHscIohpH8F4b5jwiG7rxzgbud
- JR2F39lChY/gIQIBZQIBJDAdBgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUHAwEw
- JwYJKwYBBAGCNxUKBBowGDAKBggrBgEFBQcDAjAKBggrBgEFBQcDATANBgkqhkiG
- 9w0BAQsFAAOCAQEAkgyJY5Pdyz7hF83hu9BsijKHOdMWe8fDyN7GsDR1O0URBuJW
- oK7FsemmITwMCiDhH+NDkrRWM27EQhuv4w4yIUIFVqPeJS+Ff3gKyqB/VNcrDbfc
- 1RU7Q0qyxwpItm/cEUTTTnfNppf/O6wn/FUbpvPbHMNukqhjtbiYJrmKcO1U0lEu
- i7FlnPW6rRmEbhp/bChVJMkxw8sBH4K3Vrx9c15nPuBgv4E1cFLe1rwrt3wEeRlU
- OaWMTbLwYBaBo2BC3iDHzWioSl4OtzItEkT5XxNOhViuoty09Tu5zd7byqiV7To3
- YVc+Yi/VBubgB+osvPXPAv0AQCLo88dO7MBWQg==
+ MIIGrDCCBZSgAwIBAgITKwAL5UokKuFiZ7VPlQAAAAvlSjANBgkqhkiG9w0B
+ AQsFADBOMRUwEwYKCZImiZPyLGQBGRYFTE9DQUwxHDAaBgoJkiaJk/IsZAEZ
+ FgxESEhTU0VDVVJJVFkxFzAVBgNVBAMTDk5JSC1EUEtJLUNBLTFBMB4XDTIy
+ MTIwNjE2NTUzNloXDTI0MTIwNTE2NTUzNlowgaMxCzAJBgNVBAYTAlVTMREw
+ DwYDVQQIEwhNYXJ5bGFuZDERMA8GA1UEBxMIQmV0aGVzZGExDDAKBgNVBAoT
+ A05JSDEMMAoGA1UECxMDQ0lUMSUwIwYDVQQDExx3YW1zaWduaW5nZmVkZXJh
+ dGlvbi5uaWguZ292MSswKQYJKoZIhvcNAQkBFhxuaWhsb2dpbnN1cHBvcnRA
+ bWFpbC5uaWguZ292MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA
+ o3aHcoq0SAof+GXCl6aZOw9w8CrWTSxz3hxEvG2RaJ4Bm0+UQEcQHArCiQ+Y
+ Wjmx8eORRwOblQKmcozpQAOxNRu7fbJn8msdryKdju+nBJg/gn0Ygn44EJEq
+ pZmBn+FBRgH/lADRdpLM8uO654i1x5Pr8TQtNMevGNot8oiacOZkB1A5N6+l
+ 4guxToA2ZuNhHRhwrpd1wIyq6sgY3J8XpWlx54HjDc8bZvia0bEhJns/qZpM
+ mAh5wvIP1I2JngqJ55mpl/btbIXX+uTn3tIomWre3KKjDKh9ZjUQom8VqTzp
+ oGYHSjTExuopsHnnVpC1HTW0QJoxFa5yR1f2fiUTZwIDAQABo4IDKzCCAycw
+ HQYDVR0OBBYEFMqGnTB0W0rFy8tD2y6JnApAzRCyMB8GA1UdIwQYMBaAFHls
+ OIwZ0skZB3GmEPmJ8K3QgwWTMIIBEgYDVR0fBIIBCTCCAQUwggEBoIH+oIH7
+ hoHBbGRhcDovLy9DTj1OSUgtRFBLSS1DQS0xQSxDTj1OSUhEUEtJQ0FTVkMs
+ Q049Q0RQLENOPVB1YmxpYyUyMEtleSUyMFNlcnZpY2VzLENOPVNlcnZpY2Vz
+ LENOPUNvbmZpZ3VyYXRpb24sREM9REhIU1NFQ1VSSVRZLERDPUxPQ0FMP2Nl
+ cnRpZmljYXRlUmV2b2NhdGlvbkxpc3Q/YmFzZT9vYmplY3RDbGFzcz1jUkxE
+ aXN0cmlidXRpb25Qb2ludIY1aHR0cDovL05JSERQS0lDUkwuTklILkdPVi9D
+ ZXJ0RGF0YS9OSUgtRFBLSS1DQS0xQS5jcmwwggE5BggrBgEFBQcBAQSCASsw
+ ggEnMIG0BggrBgEFBQcwAoaBp2xkYXA6Ly8vQ049TklILURQS0ktQ0EtMUEs
+ Q049QUlBLENOPVB1YmxpYyUyMEtleSUyMFNlcnZpY2VzLENOPVNlcnZpY2Vz
+ LENOPUNvbmZpZ3VyYXRpb24sREM9REhIU1NFQ1VSSVRZLERDPUxPQ0FMP2NB
+ Q2VydGlmaWNhdGU/YmFzZT9vYmplY3RDbGFzcz1jZXJ0aWZpY2F0aW9uQXV0
+ aG9yaXR5MEEGCCsGAQUFBzAChjVodHRwOi8vTklIRFBLSUNSTC5OSUguR09W
+ L0NlcnREYXRhL05JSC1EUEtJLUNBLTFBLmNydDArBggrBgEFBQcwAYYfaHR0
+ cDovL05JSERQS0lPQ1NQLk5JSC5HT1Yvb2NzcDALBgNVHQ8EBAMCBaAwPQYJ
+ KwYBBAGCNxUHBDAwLgYmKwYBBAGCNxUIh7HCKIaR/BeG+Y8Ihu68c4G7nSUd
+ gZOnCYKOiSECAWQCAUwwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMC
+ MCcGCSsGAQQBgjcVCgQaMBgwCgYIKwYBBQUHAwEwCgYIKwYBBQUHAwIwDQYJ
+ KoZIhvcNAQELBQADggEBAGxvrAxX3RUmFXeUa1UewCWfzWCnI3wTMKkqvmI2
+ CySFEOniXNXC/hhu0i000QD9mS527u+lGqgN6eaUaEaSDXMszYR753whJ1Wf
+ xJ50zji2mvUWDyzdRbcvxbVfYe6h6+TzQl0gd8z1DjAxkUWydv9aAFYHNiIY
+ BbhPqvrlOT+oV8CYI8ghEg7qyxo1mso99aVGCbnBA+6IC+jt8lvwQYFISW8J
+ lxJbz5P9fyAbQFuMvcvSkx1WWCCK+d3WsLzU2JETjmYNoID5skFaIfrq+rV1
+ nBqQfCSKApojRaUMwn83IRcosSu0Y3dhpmxz2oDkOURbwOkuPJRgYnZRLBDn
+ e50=
-
+
urn:oasis:names:tc:SAML:2.0:nameid-format:persistent
-
+
diff --git a/kube/services/fenceshib/fenceshib-deploy.yaml b/kube/services/fenceshib/fenceshib-deploy.yaml
index 528726262..ed5d67535 100644
--- a/kube/services/fenceshib/fenceshib-deploy.yaml
+++ b/kube/services/fenceshib/fenceshib-deploy.yaml
@@ -30,7 +30,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -39,6 +39,22 @@ spec:
values:
- fenceshib
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
volumes:
- name: yaml-merge
@@ -210,11 +226,10 @@ spec:
subPath: "incommon-login.bionimbus.org.crt"
resources:
requests:
- cpu: 0.8
- memory: 2400Mi
+ cpu: 100m
+ memory: 500Mi
limits:
- cpu: 2.0
- memory: 6400Mi
+ memory: 2400Mi
command: ["/bin/bash"]
args:
- "-c"
diff --git a/kube/services/fluentd/fluentd-eks-1.24.yaml b/kube/services/fluentd/fluentd-eks-1.24.yaml
new file mode 100644
index 000000000..1fb748840
--- /dev/null
+++ b/kube/services/fluentd/fluentd-eks-1.24.yaml
@@ -0,0 +1,86 @@
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: fluentd
+ namespace: logging
+ labels:
+ k8s-app: fluentd-eks-1.24-logging
+ version: v1
+ GEN3_DATE_LABEL
+ kubernetes.io/cluster-service: "true"
+spec:
+ selector:
+ matchLabels:
+ k8s-app: fluentd-eks-1.24-logging
+ version: v1
+ template:
+ metadata:
+ labels:
+ k8s-app: fluentd-eks-1.24-logging
+ version: v1
+ kubernetes.io/cluster-service: "true"
+ spec:
+ priorityClassName: system-cluster-critical
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - key: "role"
+ operator: "Equal"
+ value: "jupyter"
+ effect: "NoSchedule"
+ - key: "role"
+ operator: "Equal"
+ value: "workflow"
+ effect: "NoSchedule"
+ containers:
+ - name: fluentd
+ # Hardcode fluentd version to ensure we don't run into containerd logging issues
+ image: fluent/fluentd-kubernetes-daemonset:v1.15.3-debian-cloudwatch-1.0
+ env:
+ # See https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter#environment-variables-for-kubernetes
+ - name: K8S_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ # Deploy with kube-setup-fluentd.sh ...
+ - name: LOG_GROUP_NAME
+ GEN3_LOG_GROUP_NAME
+ - name: AWS_REGION
+ value: "us-east-1"
+ - name: FLUENTD_CONF
+ value: "gen3.conf"
+ - name: FLUENT_CONTAINER_TAIL_PARSER_TYPE
+ value: "cri"
+ resources:
+ limits:
+ memory: 1Gi
+ requests:
+ cpu: 100m
+ memory: 1Gi
+ volumeMounts:
+ - name: fluentd-gen3
+ mountPath: /fluentd/etc/gen3.conf
+ subPath: gen3.conf
+ - name: varlog
+ mountPath: /var/log
+ - name: varlibdockercontainers
+ mountPath: /var/lib/docker/containers
+ readOnly: true
+ command: ["/bin/bash" ]
+ args:
+ - "-c"
+ # Script always succeeds if it runs (echo exits with 0)
+ - |
+ /fluentd/entrypoint.sh
+ terminationGracePeriodSeconds: 30
+ serviceAccountName: fluentd
+ volumes:
+ - name: varlog
+ hostPath:
+ path: /var/log
+ - name: varlibdockercontainers
+ hostPath:
+ path: /var/lib/docker/containers
+ - name: fluentd-gen3
+ configMap:
+ name: fluentd-gen3
diff --git a/kube/services/fluentd/fluentd-karpenter.yaml b/kube/services/fluentd/fluentd-karpenter.yaml
new file mode 100644
index 000000000..807ef1198
--- /dev/null
+++ b/kube/services/fluentd/fluentd-karpenter.yaml
@@ -0,0 +1,95 @@
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: fluentd-karpenter
+ namespace: logging
+ labels:
+ k8s-app: fluentd-karpenter-logging
+ version: v1
+ GEN3_DATE_LABEL
+ kubernetes.io/cluster-service: "true"
+spec:
+ selector:
+ matchLabels:
+ k8s-app: fluentd-karpenter-logging
+ version: v1
+ template:
+ metadata:
+ labels:
+ k8s-app: fluentd-karpenter-logging
+ version: v1
+ kubernetes.io/cluster-service: "true"
+ spec:
+ priorityClassName: system-cluster-critical
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: karpenter.sh/initialized
+ operator: In
+ values:
+ - "true"
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - key: "role"
+ operator: "Equal"
+ value: "jupyter"
+ effect: "NoSchedule"
+ - key: "role"
+ operator: "Equal"
+ value: "workflow"
+ effect: "NoSchedule"
+ containers:
+ - name: fluentd
+ # Hardcode fluentd version to ensure we don't run into containerd logging issues
+ image: fluent/fluentd-kubernetes-daemonset:v1.15.3-debian-cloudwatch-1.0
+ env:
+ # See https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter#environment-variables-for-kubernetes
+ - name: K8S_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ # Deploy with kube-setup-fluentd.sh ...
+ - name: LOG_GROUP_NAME
+ GEN3_LOG_GROUP_NAME
+ - name: AWS_REGION
+ value: "us-east-1"
+ - name: FLUENTD_CONF
+ value: "gen3.conf"
+ - name: FLUENT_CONTAINER_TAIL_PARSER_TYPE
+ value: "cri"
+ resources:
+ limits:
+ memory: 1Gi
+ requests:
+ cpu: 100m
+ memory: 1Gi
+ volumeMounts:
+ - name: fluentd-gen3
+ mountPath: /fluentd/etc/gen3.conf
+ subPath: gen3.conf
+ - name: varlog
+ mountPath: /var/log
+ - name: varlibdockercontainers
+ mountPath: /var/lib/docker/containers
+ readOnly: true
+ command: ["/bin/bash" ]
+ args:
+ - "-c"
+ # Script always succeeds if it runs (echo exits with 0)
+ - |
+ /fluentd/entrypoint.sh
+ terminationGracePeriodSeconds: 30
+ serviceAccountName: fluentd
+ volumes:
+ - name: varlog
+ hostPath:
+ path: /var/log
+ - name: varlibdockercontainers
+ hostPath:
+ path: /var/lib/docker/containers
+ - name: fluentd-gen3
+ configMap:
+ name: fluentd-gen3
diff --git a/kube/services/fluentd/fluentd.yaml b/kube/services/fluentd/fluentd.yaml
index f6526ea56..112a0cab2 100644
--- a/kube/services/fluentd/fluentd.yaml
+++ b/kube/services/fluentd/fluentd.yaml
@@ -20,6 +20,16 @@ spec:
version: v1
kubernetes.io/cluster-service: "true"
spec:
+ priorityClassName: system-cluster-critical
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: karpenter.sh/initialized
+ operator: NotIn
+ values:
+ - "true"
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
@@ -27,9 +37,14 @@ spec:
operator: "Equal"
value: "jupyter"
effect: "NoSchedule"
+ - key: "role"
+ operator: "Equal"
+ value: "workflow"
+ effect: "NoSchedule"
containers:
- name: fluentd
- GEN3_FLUENTD_IMAGE
+ # Hardcode fluentd version to match karpenter daemonset
+ image: fluent/fluentd-kubernetes-daemonset:v1.15.3-debian-cloudwatch-1.0
env:
# See https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter#environment-variables-for-kubernetes
- name: K8S_NODE_NAME
diff --git a/kube/services/fluentd/gen3-1.15.3.conf b/kube/services/fluentd/gen3-1.15.3.conf
new file mode 100644
index 000000000..d9b6bed5d
--- /dev/null
+++ b/kube/services/fluentd/gen3-1.15.3.conf
@@ -0,0 +1,231 @@
+#
+# Gen3 customization of fluent config.
+# - tries to extract structure from gen3 service logs
+# - includes the default conf at the bottom - just adds prefix rules
+#
+# Deploy by:
+# - mount this file into the container at /fluentd/etc/gen3.conf
+# - set environment variable FLUENTD_CONF=gen3.conf
+#
+# https://www.fluentd.org/guides/recipes/docker-logging
+# https://docs.fluentd.org/v0.12/articles/config-file#introduction:-the-life-of-a-fluentd-event
+# https://docs.fluentd.org/v1.0/articles/out_rewrite_tag_filter
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ @type kubernetes_metadata
+ @id filter_kube_metadata
+ kubernetes_url "#{ENV['FLUENT_FILTER_KUBERNETES_URL'] || 'https://' + ENV.fetch('KUBERNETES_SERVICE_HOST') + ':' + ENV.fetch('KUBERNETES_SERVICE_PORT') + '/api'}"
+ verify_ssl "#{ENV['KUBERNETES_VERIFY_SSL'] || true}"
+ ca_file "#{ENV['KUBERNETES_CA_FILE']}"
+ skip_labels "#{ENV['FLUENT_KUBERNETES_METADATA_SKIP_LABELS'] || 'false'}"
+ skip_container_metadata "#{ENV['FLUENT_KUBERNETES_METADATA_SKIP_CONTAINER_METADATA'] || 'false'}"
+ skip_master_url "#{ENV['FLUENT_KUBERNETES_METADATA_SKIP_MASTER_URL'] || 'false'}"
+ skip_namespace_metadata "#{ENV['FLUENT_KUBERNETES_METADATA_SKIP_NAMESPACE_METADATA'] || 'false'}"
+
+
+
+ @type null
+
+
+
+ @type null
+
+
+
+ @type rewrite_tag_filter
+
+ key $._HOSTNAME
+ pattern ^(.+)$
+ tag $1.docker
+
+
+
+
+ @type rewrite_tag_filter
+
+ key $._HOSTNAME
+ pattern ^(.+)$
+ tag $1.kubelet
+
+
+
+
+ @type rewrite_tag_filter
+
+ key $.host
+ pattern ^(.+)$
+ tag $1.messages
+
+
+
+
+ @type rewrite_tag_filter
+
+ key $.host
+ pattern ^(.+)$
+ tag $1.secure
+
+
+
+
+ @type rewrite_tag_filter
+
+ # json structured log - consider adoption a standard json schema:
+ # https://github.com/timberio/log-event-json-schema
+ key message
+ pattern /^\{\s*"gen3log":/
+ tag kubernetes.gen3.json.${tag}
+
+
+ # combined log format - default Apache and nginx structure
+ # https://httpd.apache.org/docs/1.3/logs.html#combined
+ key message
+ pattern /^(((\d+\.\d+\.\d+\.\d+)|-)\s+){2}\S+\s+\[\d\d?\//
+ tag kubernetes.gen3.combined.${tag}
+
+
+ # unstructured log line
+ key message
+ pattern /\S/
+ tag kubernetes.gen3.raw.${tag}
+
+
+
+
+
+ @type record_transformer
+
+ log_type json
+ # This one doesn't work for whatever reason, if you do ${record["kubernetes"]} the whole blob would be added, but can't access subobjects
+ #container_name ${record["kubernetes"]["container_name"]}
+
+
+
+
+ @type record_transformer
+
+ log_type combined
+
+
+
+
+ @type record_transformer
+
+ log_type raw
+
+
+
+
+ @type rewrite_tag_filter
+
+ key $.kubernetes.pod_name
+ pattern ^(.+)$
+ tag "#{Time.now.strftime('%Y-%m-%d')}.$1"
+
+#
+# key $.kubernetes
+# pattern ^(.+)$
+# tag $1.container_name
+#
+
+
+#
+# @type rewrite_tag_filter
+#
+# key $.kubernetes.container_name
+# pattern ^(.+)$
+ #tag $1.${tag}
+# tag ${tag}.$1
+#
+#
+
+# TODO:
+# * python stack traces: "Traceback (most recent call last):""
+# https://docs.fluentd.org/v0.12/articles/parser_multiline#formatn
+#
+# Idea: add `visitor` cookie to revproxy ...
+
+
+
+ @type cloudwatch_logs
+ @id out_cloudwatch_logs
+ log_group_name "#{ENV['LOG_GROUP_NAME']}"
+ auto_create_stream true
+ use_tag_as_stream true
+ retention_in_days "#{ENV['RETENTION_IN_DAYS'] || 'nil'}"
+ json_handler yajl # To avoid UndefinedConversionError
+ log_rejected_request "#{ENV['LOG_REJECTED_REQUEST']}" # Log rejected request for missing parts
+
+
+
+#@include fluent.conf
+#@include conf.d/*.conf
diff --git a/kube/services/frontend-framework/frontend-framework-deploy.yaml b/kube/services/frontend-framework/frontend-framework-deploy.yaml
index 843002844..f0da277dc 100644
--- a/kube/services/frontend-framework/frontend-framework-deploy.yaml
+++ b/kube/services/frontend-framework/frontend-framework-deploy.yaml
@@ -22,7 +22,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -31,6 +31,22 @@ spec:
values:
- frontend-framework
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
volumes:
- name: ca-volume
@@ -68,10 +84,9 @@ spec:
failureThreshold: 6
resources:
requests:
- cpu: 0.6
+ cpu: 100m
memory: 512Mi
limits:
- cpu: 2
memory: 4096Mi
ports:
- containerPort: 3000
diff --git a/kube/services/frontend-framework/frontend-framework-root-deploy.yaml b/kube/services/frontend-framework/frontend-framework-root-deploy.yaml
index df66b97ad..8cad981c8 100644
--- a/kube/services/frontend-framework/frontend-framework-root-deploy.yaml
+++ b/kube/services/frontend-framework/frontend-framework-root-deploy.yaml
@@ -22,7 +22,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -31,6 +31,22 @@ spec:
values:
- frontend-framework
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
volumes:
- name: ca-volume
diff --git a/kube/services/gdcapi/gdcapi-deploy.yaml b/kube/services/gdcapi/gdcapi-deploy.yaml
index cd397cab8..5967663f0 100644
--- a/kube/services/gdcapi/gdcapi-deploy.yaml
+++ b/kube/services/gdcapi/gdcapi-deploy.yaml
@@ -14,6 +14,23 @@ spec:
labels:
app: gdcapi
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
automountServiceAccountToken: false
volumes:
- name: config-volume
diff --git a/kube/services/gen3-discovery-ai/README.md b/kube/services/gen3-discovery-ai/README.md
new file mode 100644
index 000000000..4c20678e0
--- /dev/null
+++ b/kube/services/gen3-discovery-ai/README.md
@@ -0,0 +1,42 @@
+# Gen3 Discovery AI Configuration
+
+Expects data in a `gen3-discovery-ai` folder relative to
+where the `manifest.json` is.
+
+Basic setup:
+
+`{{dir where manifest.json is}}/gen3-discovery-ai/knowledge/`
+
+- `tsvs` folder
+ - tsvs with topic_name at beginning of file
+- `markdown` folder
+ - {{topic_name_1}}
+ - markdown file(s)
+ - {{topic_name_2}}
+ - markdown file(s)
+
+The `kube-setup-gen3-discovery-ai` script syncs the above `/knowledge` folder to
+an S3 bucket. The service configuration then pulls from the S3 bucket and runs load commands
+to get the data into chromadb.
+
+> Note: See the `gen3-discovery-ai` service repo docs and README for more details on data load capabilities.
+
+Check the `gen3-discovery-ai-deploy.yaml` for what commands are being run in the automation.
+
+Expects secrets setup in `g3auto/gen3-discovery-ai` folder
+ - `credentials.json`: Google service account key if using a topic with Google Vertex AI
+ - `env`: .env file contents for service configuration (see service repo for a default one)
+
+## Populating Disk for In-Memory Vectordb Chromadb
+
+In order to setup pre-configured topics, we need to load a bunch of data
+into Chromadb (which is an in-mem vectordb with an option to persist to disk).
+
+To load topics consistently, we setup an S3 bucket to house the persisted
+data for the vectordb.
+
+### Getting data from S3 in mem
+
+We specify a path for Chromadb to use for persisted data and when it sees
+data there, it loads it in. So the deployment automation: 1. aws syncs the bucket
+and then 2. calls a script to load the files into the in-mem vectorstore from there.
diff --git a/kube/services/gen3-discovery-ai/gen3-discovery-ai-deploy.yaml b/kube/services/gen3-discovery-ai/gen3-discovery-ai-deploy.yaml
new file mode 100644
index 000000000..dcfe03248
--- /dev/null
+++ b/kube/services/gen3-discovery-ai/gen3-discovery-ai-deploy.yaml
@@ -0,0 +1,181 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: gen3-discovery-ai-deployment
+spec:
+ selector:
+ # Only select pods based on the 'app' label
+ matchLabels:
+ app: gen3-discovery-ai
+ release: production
+ strategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxSurge: 1
+ maxUnavailable: 0
+ template:
+ metadata:
+ labels:
+ app: gen3-discovery-ai
+ release: production
+ GEN3_DATE_LABEL
+ spec:
+ serviceAccountName: gen3-discovery-ai-sa
+ volumes:
+ - name: gen3-discovery-ai-g3auto-volume
+ secret:
+ secretName: gen3-discovery-ai-g3auto
+ - name: gen3-discovery-ai-knowledge-library-volume
+ emptyDir: {}
+ initContainers:
+ # chromadb's persisted disk support requires the ability to write. We don't technically need this ability
+ # since we're populating the entirety of the database from configured files (no live updates).
+ #
+ # Solution: utilize emptyDir as a writable space.
+ #
+ # Procedure: in init containers, copy files from s3 to writable
+ # temporary space in emptyDir, use files from writable space
+ # to load into knowledge libary, move final knowledge library
+ # files into top-level emptyDir and make available in final container
+ - name: gen3-discovery-ai-aws-init
+ GEN3_AWSHELPER_IMAGE|-image: quay.io/cdis/awshelper:master-|
+ imagePullPolicy: Always
+ ports:
+ - containerPort: 8080
+ env:
+ - name: GEN3_DEBUG
+ GEN3_DEBUG_FLAG|-value: "False"-|
+ volumeMounts:
+ - name: gen3-discovery-ai-g3auto-volume
+ readOnly: true
+ mountPath: /gen3discoveryai/.env
+ subPath: env
+ - name: gen3-discovery-ai-g3auto-volume
+ readOnly: true
+ mountPath: /gen3discoveryai/credentials.json
+ subPath: credentials.json
+ - name: gen3-discovery-ai-g3auto-volume
+ readOnly: true
+ mountPath: /gen3discoveryai/storage_config.json
+ subPath: storage_config.json
+ - name: gen3-discovery-ai-knowledge-library-volume
+ mountPath: /gen3discoveryai/knowledge
+ imagePullPolicy: Always
+ resources:
+ requests:
+ cpu: 1
+ limits:
+ cpu: 2
+ memory: 512Mi
+ command: ["/bin/bash"]
+ args:
+ - "-c"
+ - |
+ bucketName=$(grep -o "\"bucket\": *\"[^\"]*\"" /gen3discoveryai/storage_config.json | awk -F'"' '{print $4}')
+ echo BUCKET: "$bucketName"
+ echo
+ echo BEFORE /gen3discoveryai/knowledge
+ ls -Ra /gen3discoveryai/knowledge
+ echo
+ echo syncing from s3
+ aws s3 sync "s3://${bucketName}" "/gen3discoveryai/knowledge/tmp"
+ echo
+ echo AFTER /gen3discoveryai/knowledge
+ ls -Ra /gen3discoveryai/knowledge
+ - name: gen3-discovery-ai-knowledge-init
+ GEN3_GEN3-DISCOVERY-AI_IMAGE
+ imagePullPolicy: Always
+ ports:
+ - containerPort: 8080
+ env:
+ - name: GEN3_DEBUG
+ GEN3_DEBUG_FLAG|-value: "False"-|
+ - name: ANONYMIZED_TELEMETRY
+ value: "False"
+ - name: GOOGLE_APPLICATION_CREDENTIALS
+ value: /gen3discoveryai/credentials.json
+ volumeMounts:
+ - name: gen3-discovery-ai-g3auto-volume
+ readOnly: true
+ mountPath: /gen3discoveryai/.env
+ subPath: env
+ - name: gen3-discovery-ai-g3auto-volume
+ readOnly: true
+ mountPath: /gen3discoveryai/credentials.json
+ subPath: credentials.json
+ - name: gen3-discovery-ai-g3auto-volume
+ readOnly: true
+ mountPath: /gen3discoveryai/storage_config.json
+ subPath: storage_config.json
+ - name: gen3-discovery-ai-knowledge-library-volume
+ mountPath: /gen3discoveryai/knowledge
+ imagePullPolicy: Always
+ resources:
+ requests:
+ cpu: 1
+ limits:
+ cpu: 2
+ memory: 512Mi
+ command: ["/bin/bash"]
+ args:
+ - "-c"
+ - |
+ echo
+ echo BEFORE /gen3discoveryai/knowledge
+ ls -Ra /gen3discoveryai/knowledge
+ echo running load_into_knowledge_store.py
+ poetry run python /gen3discoveryai/bin/load_into_knowledge_store.py tsvs /gen3discoveryai/knowledge/tmp/tsvs
+
+ if [ -d "/gen3discoveryai/knowledge/tmp/markdown" ]; then
+ for dir in "/gen3discoveryai/knowledge/tmp/markdown"/*; do
+ if [ -d "$dir" ]; then
+ dir_name=$(basename "$dir")
+
+ echo "Processing directory: $dir_name. Full path: $dir"
+ poetry run python /gen3discoveryai/bin/load_into_knowledge_store.py markdown --topic $dir_name $dir
+ fi
+ done
+ else
+ echo "Not syncing markdown, directory not found: /gen3discoveryai/knowledge/tmp/markdown"
+ fi
+
+ rm -r /gen3discoveryai/knowledge/tmp/
+ echo
+ echo AFTER /gen3discoveryai/knowledge
+ ls -Ra /gen3discoveryai/knowledge
+ containers:
+ - name: gen3-discovery-ai
+ GEN3_GEN3-DISCOVERY-AI_IMAGE
+ imagePullPolicy: Always
+ ports:
+ - containerPort: 8080
+ env:
+ - name: GEN3_DEBUG
+ GEN3_DEBUG_FLAG|-value: "False"-|
+ - name: ANONYMIZED_TELEMETRY
+ value: "False"
+ - name: GOOGLE_APPLICATION_CREDENTIALS
+ value: /gen3discoveryai/credentials.json
+ volumeMounts:
+ - name: gen3-discovery-ai-g3auto-volume
+ readOnly: true
+ mountPath: /gen3discoveryai/.env
+ subPath: env
+ - name: gen3-discovery-ai-g3auto-volume
+ readOnly: true
+ mountPath: /gen3discoveryai/credentials.json
+ subPath: credentials.json
+ - name: gen3-discovery-ai-g3auto-volume
+ readOnly: true
+ mountPath: /gen3discoveryai/storage_config.json
+ subPath: storage_config.json
+ - name: gen3-discovery-ai-knowledge-library-volume
+ mountPath: /gen3discoveryai/knowledge
+ imagePullPolicy: Always
+ resources:
+ requests:
+ cpu: 1
+ limits:
+ cpu: 2
+ # NOTE: If the configured data for the knowledge library (vector database) is large, you may need to bump this
+ memory: 512Mi
diff --git a/kube/services/gen3-discovery-ai/gen3-discovery-ai-service.yaml b/kube/services/gen3-discovery-ai/gen3-discovery-ai-service.yaml
new file mode 100644
index 000000000..b4734c3b8
--- /dev/null
+++ b/kube/services/gen3-discovery-ai/gen3-discovery-ai-service.yaml
@@ -0,0 +1,21 @@
+kind: Service
+apiVersion: v1
+metadata:
+ name: gen3-discovery-ai-service
+spec:
+ selector:
+ app: gen3-discovery-ai
+ release: production
+ ports:
+ - protocol: TCP
+ port: 80
+ targetPort: 8089
+ name: http
+ nodePort: null
+ - protocol: TCP
+ port: 443
+ targetPort: 443
+ name: https
+ nodePort: null
+ type: ClusterIP
+
diff --git a/kube/services/google-sa-validation/google-sa-validation-deploy.yaml b/kube/services/google-sa-validation/google-sa-validation-deploy.yaml
index 880ce5fb3..b35fda845 100644
--- a/kube/services/google-sa-validation/google-sa-validation-deploy.yaml
+++ b/kube/services/google-sa-validation/google-sa-validation-deploy.yaml
@@ -20,6 +20,23 @@ spec:
dbfence: "yes"
GEN3_DATE_LABEL
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
volumes:
- name: yaml-merge
diff --git a/kube/services/guppy/guppy-deploy.yaml b/kube/services/guppy/guppy-deploy.yaml
index 16486d3a9..c3e8d121c 100644
--- a/kube/services/guppy/guppy-deploy.yaml
+++ b/kube/services/guppy/guppy-deploy.yaml
@@ -27,7 +27,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -36,6 +36,22 @@ spec:
values:
- guppy
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
automountServiceAccountToken: false
volumes:
- name: guppy-config
@@ -138,8 +154,7 @@ spec:
imagePullPolicy: Always
resources:
requests:
- cpu: 0.5
- memory: 1024Mi
+ cpu: 100m
+ memory: 256Mi
limits:
- cpu: 1
- memory: 2400Mi
+ memory: 2000Mi
diff --git a/kube/services/hatchery/hatchery-deploy.yaml b/kube/services/hatchery/hatchery-deploy.yaml
index f67100098..f7de81d79 100644
--- a/kube/services/hatchery/hatchery-deploy.yaml
+++ b/kube/services/hatchery/hatchery-deploy.yaml
@@ -28,7 +28,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -37,6 +37,22 @@ spec:
values:
- hatchery
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: hatchery-service-account
securityContext:
fsGroup: 1001
diff --git a/kube/services/indexd/indexd-canary-deploy.yaml b/kube/services/indexd/indexd-canary-deploy.yaml
index 92c329f26..7e17ba9af 100644
--- a/kube/services/indexd/indexd-canary-deploy.yaml
+++ b/kube/services/indexd/indexd-canary-deploy.yaml
@@ -27,7 +27,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -36,6 +36,22 @@ spec:
values:
- indexd
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
automountServiceAccountToken: false
volumes:
- name: config-volume
diff --git a/kube/services/indexd/indexd-deploy.yaml b/kube/services/indexd/indexd-deploy.yaml
index c9961fba7..239079058 100644
--- a/kube/services/indexd/indexd-deploy.yaml
+++ b/kube/services/indexd/indexd-deploy.yaml
@@ -31,7 +31,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -40,6 +40,22 @@ spec:
values:
- indexd
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
automountServiceAccountToken: false
volumes:
- name: config-volume
@@ -152,8 +168,7 @@ spec:
subPath: "ca.pem"
resources:
requests:
- cpu: 0.5
- memory: 1024Mi
+ cpu: 100m
+ memory: 512Mi
limits:
- cpu: 1.0
- memory: 2048Mi
+ memory: 1024Mi
diff --git a/kube/services/influxdb/influxdb-deployment.yaml b/kube/services/influxdb/influxdb-deployment.yaml
index 72d4b57d7..3279e3c55 100644
--- a/kube/services/influxdb/influxdb-deployment.yaml
+++ b/kube/services/influxdb/influxdb-deployment.yaml
@@ -15,6 +15,23 @@ spec:
labels:
app: influxdb
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
containers:
- image: docker.io/influxdb:1.8.0
imagePullPolicy: IfNotPresent
diff --git a/kube/services/ingress/ingress.yaml b/kube/services/ingress/ingress.yaml
index 6c9de7f56..1db08e8ef 100644
--- a/kube/services/ingress/ingress.yaml
+++ b/kube/services/ingress/ingress.yaml
@@ -9,7 +9,9 @@ metadata:
alb.ingress.kubernetes.io/certificate-arn: $ARN
alb.ingress.kubernetes.io/group.name: "$vpc_name"
alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS":443}]'
+ alb.ingress.kubernetes.io/load-balancer-attributes: idle_timeout.timeout_seconds=600
alb.ingress.kubernetes.io/actions.ssl-redirect: '{"Type": "redirect", "RedirectConfig": { "Protocol": "HTTPS", "Port": "443", "StatusCode": "HTTP_301"}}'
+ alb.ingress.kubernetes.io/ssl-policy: ELBSecurityPolicy-TLS13-1-2-FIPS-2023-04
spec:
ingressClassName: alb
rules:
@@ -22,4 +24,4 @@ spec:
service:
name: revproxy-service
port:
- number: 80
\ No newline at end of file
+ number: 80
diff --git a/kube/services/jenkins-ci-worker/jenkins-ci-worker-deploy.yaml b/kube/services/jenkins-ci-worker/jenkins-ci-worker-deploy.yaml
index f7b874111..466e4a7df 100644
--- a/kube/services/jenkins-ci-worker/jenkins-ci-worker-deploy.yaml
+++ b/kube/services/jenkins-ci-worker/jenkins-ci-worker-deploy.yaml
@@ -16,6 +16,20 @@ spec:
annotations:
"cluster-autoscaler.kubernetes.io/safe-to-evict": "false"
spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
+ - matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
serviceAccountName: jenkins-service
securityContext:
runAsUser: 1000
@@ -105,6 +119,7 @@ spec:
limits:
cpu: 0.9
memory: 4096Mi
+ ephemeral-storage: 500Mi
imagePullPolicy: Always
volumeMounts:
- name: "cert-volume"
diff --git a/kube/services/jenkins-worker/jenkins-worker-deploy.yaml b/kube/services/jenkins-worker/jenkins-worker-deploy.yaml
index 4e13eea69..aea836a4f 100644
--- a/kube/services/jenkins-worker/jenkins-worker-deploy.yaml
+++ b/kube/services/jenkins-worker/jenkins-worker-deploy.yaml
@@ -16,6 +16,20 @@ spec:
annotations:
"cluster-autoscaler.kubernetes.io/safe-to-evict": "false"
spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
+ - matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
serviceAccountName: jenkins-service
securityContext:
runAsUser: 1000
@@ -24,7 +38,8 @@ spec:
- args:
- -c
- |
- # fix permissions for /var/run/docker.sock
+ # fix permissions for docker and containerd
+ chmod 666 /var/run/containerd/containerd.sock
chmod 666 /var/run/docker.sock
echo "done"
command:
@@ -39,6 +54,8 @@ spec:
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
+ - mountPath: /var/run/containerd/containerd.sock
+ name: containerdsock
- mountPath: /var/run/docker.sock
name: dockersock
containers:
@@ -107,7 +124,7 @@ spec:
fieldPath: status.hostIP
resources:
limits:
- cpu: 0.6
+ cpu: "0.6"
memory: 2048Mi
imagePullPolicy: Always
volumeMounts:
@@ -125,7 +142,8 @@ spec:
subPath: "ca.pem"
- name: dockersock
mountPath: "/var/run/docker.sock"
- imagePullPolicy: Always
+ - name: containerdsock
+ mountPath: "/var/run/containerd/containerd.sock"
volumes:
- name: cert-volume
secret:
@@ -136,3 +154,6 @@ spec:
- name: dockersock
hostPath:
path: /var/run/docker.sock
+ - name: containerdsock
+ hostPath:
+ path: "/var/run/containerd/containerd.sock"
diff --git a/kube/services/jenkins/jenkins-deploy.yaml b/kube/services/jenkins/jenkins-deploy.yaml
index 2c6afb76d..954e996f2 100644
--- a/kube/services/jenkins/jenkins-deploy.yaml
+++ b/kube/services/jenkins/jenkins-deploy.yaml
@@ -24,6 +24,24 @@ spec:
annotations:
"cluster-autoscaler.kubernetes.io/safe-to-evict": "false"
spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
+ - matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - key: topology.kubernetes.io/zone
+ operator: In
+ values:
+ - us-east-1a
serviceAccountName: jenkins-service
securityContext:
runAsUser: 1000
@@ -97,8 +115,8 @@ spec:
port: 8080
resources:
limits:
- cpu: 0.9
- memory: 8192Mi
+ cpu: 2
+ memory: 6Gi
imagePullPolicy: Always
volumeMounts:
- name: datadir
diff --git a/kube/services/jenkins/rolebinding-devops.yaml b/kube/services/jenkins/rolebinding-devops.yaml
index 2f262172e..dd99bdd86 100644
--- a/kube/services/jenkins/rolebinding-devops.yaml
+++ b/kube/services/jenkins/rolebinding-devops.yaml
@@ -15,12 +15,37 @@ roleRef:
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
- name: argo-binding
+ name: argo-role-binding-CURRENT_NAMESPACE
namespace: argo
subjects:
- kind: ServiceAccount
name: gitops-sa
+ namespace: CURRENT_NAMESPACE
+ apiGroup: ""
roleRef:
kind: ClusterRole
name: admin
- apiGroup: ""
\ No newline at end of file
+ apiGroup: ""
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: gitops-cluster-role-CURRENT_NAMESPACE
+rules:
+- apiGroups: [""]
+ resources: ["namespaces","services"]
+ verbs: ["get", "list"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: gitops-cluster-binding-CURRENT_NAMESPACE
+subjects:
+- kind: ServiceAccount
+ name: gitops-sa
+ namespace: CURRENT_NAMESPACE
+ apiGroup: ""
+roleRef:
+ kind: ClusterRole
+ name: gitops-cluster-role-CURRENT_NAMESPACE
+ apiGroup: rbac.authorization.k8s.io
diff --git a/kube/services/jenkins2-ci-worker/jenkins2-agent-service.yaml b/kube/services/jenkins2-ci-worker/jenkins2-agent-service.yaml
new file mode 100644
index 000000000..7f4e58109
--- /dev/null
+++ b/kube/services/jenkins2-ci-worker/jenkins2-agent-service.yaml
@@ -0,0 +1,17 @@
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ name: jenkins-agent-service
+ name: jenkins-agent
+ namespace: default
+spec:
+ ports:
+ - name: slavelistener
+ port: 50000
+ protocol: TCP
+ targetPort: 50000
+ selector:
+ app: jenkins
+ sessionAffinity: None
+ type: ClusterIP
diff --git a/kube/services/jenkins2-ci-worker/jenkins2-ci-worker-deploy.yaml b/kube/services/jenkins2-ci-worker/jenkins2-ci-worker-deploy.yaml
new file mode 100644
index 000000000..3dea38a5c
--- /dev/null
+++ b/kube/services/jenkins2-ci-worker/jenkins2-ci-worker-deploy.yaml
@@ -0,0 +1,149 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: jenkins-ci-worker-deployment
+spec:
+ selector:
+ # Only select pods based on the 'app' label
+ matchLabels:
+ app: jenkins-ci-worker
+ template:
+ metadata:
+ labels:
+ app: jenkins-ci-worker
+ # for network policy
+ netnolimit: "yes"
+ annotations:
+ "cluster-autoscaler.kubernetes.io/safe-to-evict": "false"
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
+ - matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ serviceAccountName: jenkins-service
+ securityContext:
+ runAsUser: 1000
+ fsGroup: 1000
+ initContainers:
+ - args:
+ - -c
+ - |
+ # fix permissions for /var/run/docker.sock
+ chmod 666 /var/run/docker.sock
+ echo "done"
+ command:
+ - /bin/bash
+ image: quay.io/cdis/awshelper:master
+ imagePullPolicy: Always
+ name: awshelper
+ resources: {}
+ securityContext:
+ allowPrivilegeEscalation: false
+ runAsUser: 0
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ volumeMounts:
+ - mountPath: /var/run/docker.sock
+ name: dockersock
+ containers:
+ #
+ # See for details on running docker in a pod:
+ # https://estl.tech/accessing-docker-from-a-kubernetes-pod-68996709c04b
+ #
+ - name: jenkins-worker
+ image: "quay.io/cdis/gen3-ci-worker:master"
+ ports:
+ - containerPort: 8080
+ env:
+ - name: JENKINS_URL
+ value: "https://jenkins2.planx-pla.net"
+ - name: JENKINS_SECRET
+ valueFrom:
+ secretKeyRef:
+ name: jenkins-ci-worker-g3auto
+ key: jenkins-jnlp-agent-secret
+ - name: JENKINS_AGENT_NAME
+ value: "gen3-ci-worker"
+ - name: JENKINS_TUNNEL
+ value: "jenkins-agent:50000"
+ - name: AWS_DEFAULT_REGION
+ value: us-east-1
+ - name: JAVA_OPTS
+ value: "-Xmx3072m"
+ - name: AWS_ACCESS_KEY_ID
+ valueFrom:
+ secretKeyRef:
+ name: jenkins-secret
+ key: aws_access_key_id
+ - name: AWS_SECRET_ACCESS_KEY
+ valueFrom:
+ secretKeyRef:
+ name: jenkins-secret
+ key: aws_secret_access_key
+ - name: GOOGLE_EMAIL_AUX1
+ valueFrom:
+ secretKeyRef:
+ name: google-acct1
+ key: email
+ - name: GOOGLE_PASSWORD_AUX1
+ valueFrom:
+ secretKeyRef:
+ name: google-acct1
+ key: password
+ - name: GOOGLE_EMAIL_AUX2
+ valueFrom:
+ secretKeyRef:
+ name: google-acct2
+ key: email
+ - name: GOOGLE_PASSWORD_AUX2
+ valueFrom:
+ secretKeyRef:
+ name: google-acct2
+ key: password
+ - name: GOOGLE_APP_CREDS_JSON
+ valueFrom:
+ secretKeyRef:
+ name: jenkins-g3auto
+ key: google_app_creds.json
+ resources:
+ limits:
+ cpu: 0.9
+ memory: 4096Mi
+ ephemeral-storage: 500Mi
+ imagePullPolicy: Always
+ volumeMounts:
+ - name: "cert-volume"
+ readOnly: true
+ mountPath: "/mnt/ssl/service.crt"
+ subPath: "service.crt"
+ - name: "cert-volume"
+ readOnly: true
+ mountPath: "/mnt/ssl/service.key"
+ subPath: "service.key"
+ - name: "ca-volume"
+ readOnly: true
+ mountPath: "/usr/local/share/ca-certificates/cdis/cdis-ca.crt"
+ subPath: "ca.pem"
+ - name: dockersock
+ mountPath: "/var/run/docker.sock"
+ imagePullPolicy: Always
+ volumes:
+ - name: cert-volume
+ secret:
+ secretName: "cert-jenkins-service"
+ - name: ca-volume
+ secret:
+ secretName: "service-ca"
+ - name: dockersock
+ hostPath:
+ path: /var/run/docker.sock
diff --git a/kube/services/jenkins2-ci-worker/jenkins2-ci-worker-pvc.yaml b/kube/services/jenkins2-ci-worker/jenkins2-ci-worker-pvc.yaml
new file mode 100644
index 000000000..047e4e966
--- /dev/null
+++ b/kube/services/jenkins2-ci-worker/jenkins2-ci-worker-pvc.yaml
@@ -0,0 +1,12 @@
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: datadir-jenkins-ci
+ annotations:
+ volume.beta.kubernetes.io/storage-class: gp2
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 200Gi
diff --git a/kube/services/jenkins2-worker/jenkins2-worker-deploy.yaml b/kube/services/jenkins2-worker/jenkins2-worker-deploy.yaml
index ad29eb47e..5646e8bc2 100644
--- a/kube/services/jenkins2-worker/jenkins2-worker-deploy.yaml
+++ b/kube/services/jenkins2-worker/jenkins2-worker-deploy.yaml
@@ -16,6 +16,20 @@ spec:
annotations:
"cluster-autoscaler.kubernetes.io/safe-to-evict": "false"
spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
+ - matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
serviceAccountName: jenkins-service
securityContext:
runAsUser: 1000
diff --git a/kube/services/jenkins2/jenkins2-deploy.yaml b/kube/services/jenkins2/jenkins2-deploy.yaml
index 673686d17..08365f811 100644
--- a/kube/services/jenkins2/jenkins2-deploy.yaml
+++ b/kube/services/jenkins2/jenkins2-deploy.yaml
@@ -24,6 +24,20 @@ spec:
annotations:
"cluster-autoscaler.kubernetes.io/safe-to-evict": "false"
spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
+ - matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
serviceAccountName: jenkins-service
securityContext:
runAsUser: 1000
@@ -34,7 +48,7 @@ spec:
# https://estl.tech/accessing-docker-from-a-kubernetes-pod-68996709c04b
#
- name: jenkins
- GEN3_JENKINS_IMAGE
+ GEN3_JENKINS2_IMAGE
ports:
- containerPort: 8080
name: http
diff --git a/kube/services/jobs/arborist-rm-expired-access-cronjob.yaml b/kube/services/jobs/arborist-rm-expired-access-cronjob.yaml
index 328894689..a72623736 100644
--- a/kube/services/jobs/arborist-rm-expired-access-cronjob.yaml
+++ b/kube/services/jobs/arborist-rm-expired-access-cronjob.yaml
@@ -1,4 +1,4 @@
-apiVersion: batch/v1beta1
+apiVersion: batch/v1
kind: CronJob
metadata:
name: arborist-rm-expired-access
@@ -14,6 +14,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
volumes:
- name: arborist-secret
secret:
diff --git a/kube/services/jobs/arborist-rm-expired-access-job.yaml b/kube/services/jobs/arborist-rm-expired-access-job.yaml
index 34833dded..6985906d0 100644
--- a/kube/services/jobs/arborist-rm-expired-access-job.yaml
+++ b/kube/services/jobs/arborist-rm-expired-access-job.yaml
@@ -8,6 +8,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
dnsConfig:
options:
diff --git a/kube/services/jobs/arboristdb-create-job.yaml b/kube/services/jobs/arboristdb-create-job.yaml
index 74d7bebe4..7898a0c91 100644
--- a/kube/services/jobs/arboristdb-create-job.yaml
+++ b/kube/services/jobs/arboristdb-create-job.yaml
@@ -9,6 +9,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
volumes:
- name: arborist-secret
diff --git a/kube/services/jobs/aws-bucket-replicate-job.yaml b/kube/services/jobs/aws-bucket-replicate-job.yaml
index d9f0f08ad..d3893d2bb 100644
--- a/kube/services/jobs/aws-bucket-replicate-job.yaml
+++ b/kube/services/jobs/aws-bucket-replicate-job.yaml
@@ -10,6 +10,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
volumes:
- name: cred-volume
secret:
diff --git a/kube/services/jobs/bucket-manifest-job.yaml b/kube/services/jobs/bucket-manifest-job.yaml
index 98506331e..9cfbe054b 100644
--- a/kube/services/jobs/bucket-manifest-job.yaml
+++ b/kube/services/jobs/bucket-manifest-job.yaml
@@ -9,6 +9,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: sa-#SA_NAME_PLACEHOLDER#
volumes:
- name: cred-volume
diff --git a/kube/services/jobs/bucket-replicate-job.yaml b/kube/services/jobs/bucket-replicate-job.yaml
index fbaf15816..0f7ae9260 100644
--- a/kube/services/jobs/bucket-replicate-job.yaml
+++ b/kube/services/jobs/bucket-replicate-job.yaml
@@ -17,6 +17,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: batch-operations-account
securityContext:
fsGroup: 1000
diff --git a/kube/services/jobs/bucket-replication-job.yaml b/kube/services/jobs/bucket-replication-job.yaml
index 4ef56367e..c8e541d9e 100644
--- a/kube/services/jobs/bucket-replication-job.yaml
+++ b/kube/services/jobs/bucket-replication-job.yaml
@@ -8,6 +8,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: sa-#SA_NAME_PLACEHOLDER#
volumes:
- name: cred-volume
diff --git a/kube/services/jobs/bucket-size-report-job.yaml b/kube/services/jobs/bucket-size-report-job.yaml
index 253d010e4..89d927f15 100644
--- a/kube/services/jobs/bucket-size-report-job.yaml
+++ b/kube/services/jobs/bucket-size-report-job.yaml
@@ -8,6 +8,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
restartPolicy: Never
securityContext:
fsGroup: 1000
diff --git a/kube/services/jobs/cedar-ingestion-job.yaml b/kube/services/jobs/cedar-ingestion-job.yaml
index 37f537c53..f6be4dd23 100644
--- a/kube/services/jobs/cedar-ingestion-job.yaml
+++ b/kube/services/jobs/cedar-ingestion-job.yaml
@@ -1,61 +1,59 @@
#
# run with:
-# gen3 job run cedar-ingestion \
-# SUBMISSION_USER $submission_user \
-# CEDAR_DIRECTORY_ID $cedar_directory_id \
-#
-# SUBMISSION_USER(optional)
-# e-mail of user-account to submit the data to MDS, must have MDS admin and CEDAR polices granted. Default: "cdis.autotest@gmail.com"
+# gen3 job run cedar-ingestion [CEDAR_DIRECTORY_ID $cedar_directory_id]
#
# CEDAR_DIRECTORY_ID
-# ID of CEDAR directory where instances will be pulled from, only needs its UUID part. For example: "123e4567-e89b-12d3-a456-426614174000"
+# The directory id will be read from 'directory_id.txt' in the
+# 'cedar-g3auto' secret.
+# You can override the secret value with an optional command line argument.
+#
# The deployed CEDAR wrapper services must be able to read from this directory.
#
-# Example
-# gen3 job run cedar-ingestion CEDAR_DIRECTORY_ID 123e4567-e89b-12d3-a456-426614174000 SUBMISSION_USER cdis.autotest@gmail.com
+# ACCESS TOKENS
+# Access tokens will be generated for an existing fence-client, cedar_ingest_client.
+# The client_id and client_secret will be read from
+# 'cedar_client_credentials.json' in the 'cedar-g3auto' secret.
+#
+# The fence-client must have MDS admin and CEDAR polices granted.
#
+
apiVersion: batch/v1
kind: Job
metadata:
name: cedar-ingestion
spec:
+ backoffLimit: 0
template:
metadata:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: useryaml-job
volumes:
- - name: yaml-merge
- configMap:
- name: "fence-yaml-merge"
- name: shared-data
emptyDir: {}
-# -----------------------------------------------------------------------------
-# DEPRECATED! Remove when all commons are no longer using local_settings.py
-# for fence.
-# -----------------------------------------------------------------------------
- - name: old-config-volume
- secret:
- secretName: "fence-secret"
- - name: creds-volume
- secret:
- secretName: "fence-creds"
- - name: config-helper
- configMap:
- name: config-helper
- - name: json-secret-volume
+ - name: cedar-client-volume-g3auto
secret:
- secretName: "fence-json-secret"
-# -----------------------------------------------------------------------------
- - name: config-volume
- secret:
- secretName: "fence-config"
- - name: fence-jwt-keys
- secret:
- secretName: "fence-jwt-keys"
- containers:
- - name: awshelper
+ secretName: cedar-g3auto # the secret name in kube
+ initContainers:
+ - name: cedar
image: quay.io/cdis/awshelper:master
imagePullPolicy: Always
ports:
@@ -66,10 +64,18 @@ spec:
configMapKeyRef:
name: global
key: hostname
- - name: SUBMISSION_USER
- GEN3_SUBMISSION_USER|-value: "cdis.autotest@gmail.com"-|
- name: CEDAR_DIRECTORY_ID
GEN3_CEDAR_DIRECTORY_ID|-value: ""-|
+ - name: CEDAR_DIRECTORY_ID_SECRET
+ valueFrom:
+ secretKeyRef:
+ name: cedar-g3auto
+ key: "directory_id.txt"
+ - name: CEDAR_CLIENT_CREDENTIALS
+ valueFrom:
+ secretKeyRef:
+ name: cedar-g3auto
+ key: "cedar_client_credentials.json"
volumeMounts:
- name: shared-data
mountPath: /mnt/shared
@@ -77,117 +83,75 @@ spec:
limits:
cpu: 1
memory: 5Gi
+
command: ["/bin/bash" ]
args:
- "-c"
- |
if [[ -z "$CEDAR_DIRECTORY_ID" ]]; then
- echo -e "CEDAR_DIRECTORY_ID is required" 1>&2
- exit 1
+ if [[ ! -z "$CEDAR_DIRECTORY_ID_SECRET" ]]; then
+ echo "CEDAR_DIRECTORY_ID is from g3auto secret"
+ export CEDAR_DIRECTORY_ID=$CEDAR_DIRECTORY_ID_SECRET
+ else
+ echo -e "ERROR: CEDAR_DIRECTORY_ID must be in secret or on command line" 1>&2
+ exit 0
+ fi
+ else
+ echo "CEDAR_DIRECTORY_ID is from command line parameter"
+ fi
+
+ if [[ ! -z "$CEDAR_CLIENT_CREDENTIALS" ]]; then
+ export CEDAR_CLIENT_ID=$(echo $CEDAR_CLIENT_CREDENTIALS | jq -r .client_id)
+ export CEDAR_CLIENT_SECRET=$(echo $CEDAR_CLIENT_CREDENTIALS | jq -r .client_secret)
+ else
+ echo -e "Could not read cedar-client credentials" 1>&2
+ exit 0
fi
- let count=0
- while [[ ! -f /mnt/shared/access_token.txt && $count -lt 50 ]]; do
- echo "Waiting for /mnt/shared/access_token.txt";
- sleep 2
- let count=$count+1
- done
+
pip install pydash
export GEN3_HOME="$HOME/cloud-automation"
- export ACCESS_TOKEN="$(cat /mnt/shared/access_token.txt)"
- python ${GEN3_HOME}/files/scripts/healdata/heal-cedar-data-ingest.py --access_token $ACCESS_TOKEN --directory $CEDAR_DIRECTORY_ID --hostname $HOSTNAME
- echo "All done - exit status $?"
- - name: fence
- GEN3_FENCE_IMAGE
- imagePullPolicy: Always
- env:
- - name: PYTHONPATH
- value: /var/www/fence
- - name: SUBMISSION_USER
- GEN3_SUBMISSION_USER|-value: "cdis.autotest@gmail.com"-|
- - name: TOKEN_EXPIRATION
- value: "3600"
- - name: FENCE_PUBLIC_CONFIG
- valueFrom:
- configMapKeyRef:
- name: manifest-fence
- key: fence-config-public.yaml
- optional: true
- volumeMounts:
-# -----------------------------------------------------------------------------
-# DEPRECATED! Remove when all commons are no longer using local_settings.py
-# for fence.
-# -----------------------------------------------------------------------------
- - name: "old-config-volume"
- readOnly: true
- mountPath: "/var/www/fence/local_settings.py"
- subPath: local_settings.py
- - name: "creds-volume"
- readOnly: true
- mountPath: "/var/www/fence/creds.json"
- subPath: creds.json
- - name: "config-helper"
- readOnly: true
- mountPath: "/var/www/fence/config_helper.py"
- subPath: config_helper.py
- - name: "json-secret-volume"
- readOnly: true
- mountPath: "/var/www/fence/fence_credentials.json"
- subPath: fence_credentials.json
-# -----------------------------------------------------------------------------
- - name: "config-volume"
- readOnly: true
- mountPath: "/var/www/fence/fence-config-secret.yaml"
- subPath: fence-config.yaml
- - name: "yaml-merge"
- readOnly: true
- mountPath: "/var/www/fence/yaml_merge.py"
- subPath: yaml_merge.py
- - name: "fence-jwt-keys"
- readOnly: true
- mountPath: "/fence/jwt-keys.tar"
- subPath: "jwt-keys.tar"
- - name: shared-data
- mountPath: /mnt/shared
- command: ["/bin/bash" ]
- args:
+ python ${GEN3_HOME}/files/scripts/healdata/heal-cedar-data-ingest.py --directory $CEDAR_DIRECTORY_ID --cedar_client_id $CEDAR_CLIENT_ID --cedar_client_secret $CEDAR_CLIENT_SECRET --hostname $HOSTNAME
+ status=$?
+ if [[ $status -ne 0 ]]; then
+ echo "WARNING: non zero exit code: $status"
+ else
+ echo "All done - exit code: $status"
+ touch /mnt/shared/success
+ fi
+ containers:
+ - name: awshelper
+ env:
+ - name: slackWebHook
+ valueFrom:
+ configMapKeyRef:
+ name: global
+ key: slack_webhook
+ - name: gen3Env
+ valueFrom:
+ configMapKeyRef:
+ name: manifest-global
+ key: hostname
+ GEN3_AWSHELPER_IMAGE|-image: quay.io/cdis/awshelper:master-|
+ volumeMounts:
+ - name: shared-data
+ mountPath: /mnt/shared
+ command: ["/bin/bash"]
+ args:
- "-c"
- |
- echo "${FENCE_PUBLIC_CONFIG:-""}" > "/var/www/fence/fence-config-public.yaml"
- python /var/www/fence/yaml_merge.py /var/www/fence/fence-config-public.yaml /var/www/fence/fence-config-secret.yaml > /var/www/fence/fence-config.yaml
- if [ -f /fence/jwt-keys.tar ]; then
- cd /fence
- tar xvf jwt-keys.tar
- if [ -d jwt-keys ]; then
- mkdir -p keys
- mv jwt-keys/* keys/
- fi
+ if [[ ! "$slackWebHook" =~ ^http ]]; then
+ echo "Slack webhook not set"
+ exit 0
fi
- echo "generate access token"
- echo "fence-create --path fence token-create --type access_token --username $SUBMISSION_USER --scopes openid,user,test-client --exp $TOKEN_EXPIRATION"
- tempFile="$(mktemp -p /tmp token.txt_XXXXXX)"
- success=false
- count=0
- sleepTime=10
- # retry loop
- while [[ $count -lt 3 && $success == false ]]; do
- if fence-create --path fence token-create --type access_token --username $SUBMISSION_USER --scopes openid,user,test-client --exp $TOKEN_EXPIRATION > "$tempFile"; then
- echo "fence-create success!"
- tail -1 "$tempFile" > /mnt/shared/access_token.txt
- # base64 --decode complains about invalid characters - don't know why
- awk -F . '{ print $2 }' /mnt/shared/access_token.txt | base64 --decode 2> /dev/null
- success=true
- else
- echo "fence-create failed!"
- cat "$tempFile"
- echo "sleep for $sleepTime, then retry"
- sleep "$sleepTime"
- let sleepTime=$sleepTime+$sleepTime
- fi
- let count=$count+1
- done
- if [[ $success != true ]]; then
- echo "Giving up on fence-create after $count retries - failed to create valid access token"
+ if ! [ -f /mnt/shared/success ]; then
+ success="FAILED"
+ color="ff0000"
+ else
+ success="SUCCESS"
+ color="2EB67D"
fi
- echo ""
- echo "All Done - always succeed to avoid k8s retries"
+ echo "Sending ${success} message to slack..."
+ payload="{\"attachments\": [{\"fallback\": \"JOB ${success}: cedar-ingest cronjob on ${gen3Env}\",\"color\": \"#${color}\",\"title\": \"JOB ${success}: cedar-ingest cronjob on ${gen3Env}\",\"text\": \"Pod name: ${HOSTNAME}\",\"ts\": \"$(date +%s)\"}]}"
+ echo "Payload=${payload}"
+ curl -X POST --data-urlencode "payload=${payload}" "${slackWebHook}"
restartPolicy: Never
diff --git a/kube/services/jobs/client-modify-job.yaml b/kube/services/jobs/client-modify-job.yaml
index 995fdd483..5726092be 100644
--- a/kube/services/jobs/client-modify-job.yaml
+++ b/kube/services/jobs/client-modify-job.yaml
@@ -11,6 +11,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
volumes:
- name: yaml-merge
configMap:
diff --git a/kube/services/jobs/cogwheel-register-client-job.yaml b/kube/services/jobs/cogwheel-register-client-job.yaml
index 03461619b..1bdbf906d 100644
--- a/kube/services/jobs/cogwheel-register-client-job.yaml
+++ b/kube/services/jobs/cogwheel-register-client-job.yaml
@@ -17,6 +17,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
volumes:
- name: cogwheel-g3auto
secret:
diff --git a/kube/services/jobs/config-fence-job.yaml b/kube/services/jobs/config-fence-job.yaml
index 7fd655937..38be19d61 100644
--- a/kube/services/jobs/config-fence-job.yaml
+++ b/kube/services/jobs/config-fence-job.yaml
@@ -18,6 +18,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: useryaml-job
volumes:
- name: shared-data
diff --git a/kube/services/jobs/covid19-bayes-cronjob.yaml b/kube/services/jobs/covid19-bayes-cronjob.yaml
index 951668b0c..01e71bade 100644
--- a/kube/services/jobs/covid19-bayes-cronjob.yaml
+++ b/kube/services/jobs/covid19-bayes-cronjob.yaml
@@ -1,5 +1,5 @@
# gen3 job run covid19-bayes-cronjob S3_BUCKET
-apiVersion: batch/v1beta1
+apiVersion: batch/v1
kind: CronJob
metadata:
name: covid19-bayes
@@ -16,6 +16,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: s3-access-opencdn-databucket-gen3
restartPolicy: Never
nodeSelector:
diff --git a/kube/services/jobs/covid19-bayes-job.yaml b/kube/services/jobs/covid19-bayes-job.yaml
index a47ed9fc5..0afc186b9 100644
--- a/kube/services/jobs/covid19-bayes-job.yaml
+++ b/kube/services/jobs/covid19-bayes-job.yaml
@@ -9,6 +9,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: s3-access-opencdn-databucket-gen3
restartPolicy: Never
containers:
diff --git a/kube/services/jobs/covid19-etl-job.yaml b/kube/services/jobs/covid19-etl-job.yaml
index d94c24808..dd2f6571f 100644
--- a/kube/services/jobs/covid19-etl-job.yaml
+++ b/kube/services/jobs/covid19-etl-job.yaml
@@ -10,6 +10,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: s3-access-opencdn-databucket-gen3
volumes:
- name: cred-volume
diff --git a/kube/services/jobs/covid19-notebook-etl-job.yaml b/kube/services/jobs/covid19-notebook-etl-job.yaml
index 3d22b0240..e482c0505 100644
--- a/kube/services/jobs/covid19-notebook-etl-job.yaml
+++ b/kube/services/jobs/covid19-notebook-etl-job.yaml
@@ -9,6 +9,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: s3-access-opencdn-databucket-gen3
volumes:
- name: cred-volume
diff --git a/kube/services/jobs/data-ingestion-job.yaml b/kube/services/jobs/data-ingestion-job.yaml
index 9530d0c8c..797b18912 100644
--- a/kube/services/jobs/data-ingestion-job.yaml
+++ b/kube/services/jobs/data-ingestion-job.yaml
@@ -8,6 +8,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
restartPolicy: Never
volumes:
- name: shared-data
diff --git a/kube/services/jobs/distribute-licenses-job.yaml b/kube/services/jobs/distribute-licenses-job.yaml
index 8418f08e7..1c2ad4284 100644
--- a/kube/services/jobs/distribute-licenses-job.yaml
+++ b/kube/services/jobs/distribute-licenses-job.yaml
@@ -19,6 +19,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
restartPolicy: Never
serviceAccountName: hatchery-service-account
containers:
@@ -31,6 +48,11 @@ spec:
configMapKeyRef:
name: manifest-hatchery
key: "user-namespace"
+ - name: GEN3_STATA_LICENSE
+ valueFrom:
+ secretKeyRef:
+ name: stata-workspace-gen3-license-g3auto
+ key: "stata_license.txt"
command: ["python"]
args:
- "-c"
@@ -55,7 +77,7 @@ spec:
for container in pod.get('spec', {}).get('containers', []):
- if "stata-heal" in container['image']:
+ if "jupyter-pystata-gen3-licensed" in container['image']:
existing_license_id = pod.get("metadata", {}).get("annotations", {}).get("stata-license")
@@ -79,15 +101,14 @@ spec:
used_licenses.sort()
print(f"Licenses currently in use: {used_licenses}")
- # This is a free trial license for demo purposes only
- # Todo: store, mount licenses secret
- license_file = """
- 501709301583!$n1d p$53 zvqe 2sfz jzft 7aei e8yL 8ue$ j38b!snic!first line!second line!2100!
- 501709301583!$n1d p$53 zvqe 2sfz jzft 7aei e8yL 8ue$ j38b!snic!first line!second line!2100!
- 501709301583!$n1d p$53 zvqe 2sfz jzft 7aei e8yL 8ue$ j38b!snic!first line!second line!2100!
- """.strip()
+ # The Gen3 Stata license strings should be stored in a kubernetes secret using g3auto.
+ # The format of the secret is one license string per line.
+ # The license strings are generated with 'stinit' using the information in a license PDF.
+ license_secrets = os.environ['GEN3_STATA_LICENSE']
+ license_secrets = license_secrets.strip()
- licenses = license_file.split("\n")
+ licenses = license_secrets.split("\n")
+ print(f"Number of licenses = {len(licenses)}")
available_license_ids = [
license_id for license_id, license in enumerate(licenses)
if license_id not in used_licenses
diff --git a/kube/services/jobs/ecr-access-job.yaml b/kube/services/jobs/ecr-access-job.yaml
new file mode 100644
index 000000000..89bb49d6d
--- /dev/null
+++ b/kube/services/jobs/ecr-access-job.yaml
@@ -0,0 +1,83 @@
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: ecr-access
+spec:
+ template:
+ metadata:
+ labels:
+ app: gen3job
+ spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
+ restartPolicy: Never
+ serviceAccountName: ecr-access-job-sa
+ securityContext:
+ fsGroup: 1000
+ containers:
+ - name: awshelper
+ GEN3_AWSHELPER_IMAGE|-image: quay.io/cdis/awshelper:master-|
+ imagePullPolicy: Always
+ resources:
+ limits:
+ cpu: 0.5
+ memory: 1Gi
+ env:
+ - name: SLACK_WEBHOOK
+ valueFrom:
+ configMapKeyRef:
+ name: global
+ key: slack_webhook
+ optional: true
+ - name: HOSTNAME
+ valueFrom:
+ configMapKeyRef:
+ name: global
+ key: hostname
+ - name: PAY_MODELS_DYNAMODB_TABLE
+ valueFrom:
+ configMapKeyRef:
+ name: manifest-hatchery
+ key: pay-models-dynamodb-table
+ optional: true
+ - name: ECR_ACCESS_JOB_ARN
+ valueFrom:
+ configMapKeyRef:
+ name: manifest-global
+ key: ecr-access-job-role-arn
+ optional: true
+ command: ["/bin/bash"]
+ args:
+ - "-c"
+ - |
+ cd cloud-automation/files/scripts/
+ echo Installing requirements...
+ pip3 install -r ecr-access-job-requirements.txt
+ python3 ecr-access-job.py
+ exitcode=$?
+
+ if [[ "${SLACK_WEBHOOK}" != 'None' ]]; then
+ if [[ $exitcode == 1 ]]; then
+ curl -X POST --data-urlencode "payload={\"text\": \"JOBFAIL: ECR access job on ${HOSTNAME}\"}" "${SLACK_WEBHOOK}"
+ else
+ curl -X POST --data-urlencode "payload={\"text\": \"SUCCESS: ECR access job on ${HOSTNAME}\"}" "${SLACK_WEBHOOK}"
+ fi
+ fi
+
+ echo "Exit code: $exitcode"
+ exit "$exitcode"
diff --git a/kube/services/jobs/envtest-job.yaml b/kube/services/jobs/envtest-job.yaml
index 6f2c72383..382b725ff 100644
--- a/kube/services/jobs/envtest-job.yaml
+++ b/kube/services/jobs/envtest-job.yaml
@@ -10,6 +10,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
restartPolicy: Never
automountServiceAccountToken: false
containers:
diff --git a/kube/services/jobs/es-garbage-job.yaml b/kube/services/jobs/es-garbage-job.yaml
index 13385f446..9d5dcf33f 100644
--- a/kube/services/jobs/es-garbage-job.yaml
+++ b/kube/services/jobs/es-garbage-job.yaml
@@ -9,6 +9,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
restartPolicy: Never
serviceAccountName: gitops-sa
securityContext:
diff --git a/kube/services/jobs/etl-cronjob.yaml b/kube/services/jobs/etl-cronjob.yaml
index f7ca5fd5b..3c3828dac 100644
--- a/kube/services/jobs/etl-cronjob.yaml
+++ b/kube/services/jobs/etl-cronjob.yaml
@@ -1,4 +1,4 @@
-apiVersion: batch/v1beta1
+apiVersion: batch/v1
kind: CronJob
metadata:
name: etl
@@ -15,6 +15,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
volumes:
- name: creds-volume
secret:
@@ -62,7 +79,7 @@ spec:
valueFrom:
configMapKeyRef:
name: global
- key: environment
+ key: hostname
volumeMounts:
- name: "creds-volume"
readOnly: true
@@ -78,8 +95,10 @@ spec:
subPath: user.yaml
resources:
limits:
- cpu: 1
+ cpu: 2
memory: 10Gi
+ requests:
+ cpu: 2
command: ["/bin/bash"]
args:
- "-c"
diff --git a/kube/services/jobs/etl-job.yaml b/kube/services/jobs/etl-job.yaml
index 8540f3902..266b0410c 100644
--- a/kube/services/jobs/etl-job.yaml
+++ b/kube/services/jobs/etl-job.yaml
@@ -2,6 +2,8 @@
apiVersion: batch/v1
kind: Job
metadata:
+ annotations:
+ karpenter.sh/do-not-evict: "true"
name: etl
spec:
backoffLimit: 0
@@ -10,6 +12,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
volumes:
- name: creds-volume
secret:
@@ -72,8 +91,10 @@ spec:
subPath: user.yaml
resources:
limits:
- cpu: 1
+ cpu: 2
memory: 10Gi
+ requests:
+ cpu: 2
command: ["/bin/bash" ]
args:
- "-c"
diff --git a/kube/services/jobs/fence-cleanup-expired-ga4gh-info-cronjob.yaml b/kube/services/jobs/fence-cleanup-expired-ga4gh-info-cronjob.yaml
index bce341aac..93eaf7652 100644
--- a/kube/services/jobs/fence-cleanup-expired-ga4gh-info-cronjob.yaml
+++ b/kube/services/jobs/fence-cleanup-expired-ga4gh-info-cronjob.yaml
@@ -1,5 +1,5 @@
---
-apiVersion: batch/v1beta1
+apiVersion: batch/v1
kind: CronJob
metadata:
name: fence-cleanup-expired-ga4gh-info
@@ -16,6 +16,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: useryaml-job
volumes:
- name: yaml-merge
diff --git a/kube/services/jobs/fence-cleanup-expired-ga4gh-info-job.yaml b/kube/services/jobs/fence-cleanup-expired-ga4gh-info-job.yaml
index bed88c308..afeaebf72 100644
--- a/kube/services/jobs/fence-cleanup-expired-ga4gh-info-job.yaml
+++ b/kube/services/jobs/fence-cleanup-expired-ga4gh-info-job.yaml
@@ -9,6 +9,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: useryaml-job
volumes:
- name: yaml-merge
diff --git a/kube/services/jobs/fence-db-migrate-job.yaml b/kube/services/jobs/fence-db-migrate-job.yaml
index f8d2a001c..53dda3e21 100644
--- a/kube/services/jobs/fence-db-migrate-job.yaml
+++ b/kube/services/jobs/fence-db-migrate-job.yaml
@@ -9,6 +9,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: useryaml-job
volumes:
- name: yaml-merge
@@ -30,11 +47,6 @@ spec:
GEN3_AWSHELPER_IMAGE|-image: quay.io/cdis/awshelper:master-|
imagePullPolicy: Always
env:
- - name: gen3Env
- valueFrom:
- configMapKeyRef:
- name: global
- key: environment
- name: JENKINS_HOME
value: ""
- name: GEN3_NOPROXY
@@ -99,11 +111,6 @@ spec:
GEN3_AWSHELPER_IMAGE|-image: quay.io/cdis/awshelper:master-|
imagePullPolicy: Always
env:
- - name: gen3Env
- valueFrom:
- configMapKeyRef:
- name: global
- key: environment
- name: JENKINS_HOME
value: ""
- name: GEN3_NOPROXY
diff --git a/kube/services/jobs/fence-delete-expired-clients-job.yaml b/kube/services/jobs/fence-delete-expired-clients-job.yaml
index bac613404..9252f6828 100644
--- a/kube/services/jobs/fence-delete-expired-clients-job.yaml
+++ b/kube/services/jobs/fence-delete-expired-clients-job.yaml
@@ -11,6 +11,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: useryaml-job
volumes:
- name: yaml-merge
diff --git a/kube/services/jobs/fence-visa-update-cronjob.yaml b/kube/services/jobs/fence-visa-update-cronjob.yaml
index 5409da672..eba842ddf 100644
--- a/kube/services/jobs/fence-visa-update-cronjob.yaml
+++ b/kube/services/jobs/fence-visa-update-cronjob.yaml
@@ -1,4 +1,4 @@
-apiVersion: batch/v1beta1
+apiVersion: batch/v1
kind: CronJob
metadata:
name: fence-visa-update
@@ -15,6 +15,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: useryaml-job
volumes:
- name: yaml-merge
@@ -56,7 +73,7 @@ spec:
valueFrom:
configMapKeyRef:
name: global
- key: environment
+ key: hostname
- name: FENCE_PUBLIC_CONFIG
valueFrom:
configMapKeyRef:
diff --git a/kube/services/jobs/fence-visa-update-job.yaml b/kube/services/jobs/fence-visa-update-job.yaml
index a34c9cff7..973ba2e3d 100644
--- a/kube/services/jobs/fence-visa-update-job.yaml
+++ b/kube/services/jobs/fence-visa-update-job.yaml
@@ -9,6 +9,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: useryaml-job
volumes:
- name: yaml-merge
@@ -50,7 +67,7 @@ spec:
valueFrom:
configMapKeyRef:
name: global
- key: environment
+ key: hostname
- name: FENCE_PUBLIC_CONFIG
valueFrom:
configMapKeyRef:
diff --git a/kube/services/jobs/fencedb-create-job.yaml b/kube/services/jobs/fencedb-create-job.yaml
index 7b3417c7e..a99c7aca3 100644
--- a/kube/services/jobs/fencedb-create-job.yaml
+++ b/kube/services/jobs/fencedb-create-job.yaml
@@ -9,6 +9,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
volumes:
- name: creds-volume
diff --git a/kube/services/jobs/fluentd-restart-job.yaml b/kube/services/jobs/fluentd-restart-job.yaml
index 5c984b7ae..e843d9c68 100644
--- a/kube/services/jobs/fluentd-restart-job.yaml
+++ b/kube/services/jobs/fluentd-restart-job.yaml
@@ -10,6 +10,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
restartPolicy: Never
serviceAccountName: fluentd-restart
containers:
diff --git a/kube/services/jobs/gdcdb-create-job.yaml b/kube/services/jobs/gdcdb-create-job.yaml
index 2ceb333b0..1668429ad 100644
--- a/kube/services/jobs/gdcdb-create-job.yaml
+++ b/kube/services/jobs/gdcdb-create-job.yaml
@@ -9,6 +9,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
volumes:
- name: creds-volume
diff --git a/kube/services/jobs/gen3qa-check-bucket-access-job.yaml b/kube/services/jobs/gen3qa-check-bucket-access-job.yaml
index c95516ca9..87ebc56be 100644
--- a/kube/services/jobs/gen3qa-check-bucket-access-job.yaml
+++ b/kube/services/jobs/gen3qa-check-bucket-access-job.yaml
@@ -8,6 +8,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
containers:
- name: gen3qa-check-bucket-access
GEN3_GEN3_QA_CONTROLLER_IMAGE|-image: quay.io/cdis/gen3-qa-controller:fix_gen3qa_get_check-|
diff --git a/kube/services/jobs/gentestdata-job.yaml b/kube/services/jobs/gentestdata-job.yaml
index b0c856e91..db2fcd82d 100644
--- a/kube/services/jobs/gentestdata-job.yaml
+++ b/kube/services/jobs/gentestdata-job.yaml
@@ -34,6 +34,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: useryaml-job
volumes:
- name: yaml-merge
diff --git a/kube/services/jobs/gitops-sync-job.yaml b/kube/services/jobs/gitops-sync-job.yaml
index 6044aff01..664bdf4c1 100644
--- a/kube/services/jobs/gitops-sync-job.yaml
+++ b/kube/services/jobs/gitops-sync-job.yaml
@@ -9,6 +9,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
restartPolicy: Never
serviceAccountName: gitops-sa
securityContext:
diff --git a/kube/services/jobs/google-bucket-manifest-job.yaml b/kube/services/jobs/google-bucket-manifest-job.yaml
index dcd6cd35e..619c1c03e 100644
--- a/kube/services/jobs/google-bucket-manifest-job.yaml
+++ b/kube/services/jobs/google-bucket-manifest-job.yaml
@@ -8,6 +8,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
volumes:
- name: cred-volume
secret:
diff --git a/kube/services/jobs/google-bucket-replicate-job.yaml b/kube/services/jobs/google-bucket-replicate-job.yaml
index f61a47868..7e9b2e0a7 100644
--- a/kube/services/jobs/google-bucket-replicate-job.yaml
+++ b/kube/services/jobs/google-bucket-replicate-job.yaml
@@ -12,6 +12,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
volumes:
- name: cred-volume
secret:
diff --git a/kube/services/jobs/google-create-bucket-job.yaml b/kube/services/jobs/google-create-bucket-job.yaml
index eed19dfbb..6e3f248a7 100644
--- a/kube/services/jobs/google-create-bucket-job.yaml
+++ b/kube/services/jobs/google-create-bucket-job.yaml
@@ -47,6 +47,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: useryaml-job
volumes:
- name: yaml-merge
diff --git a/kube/services/jobs/google-delete-expired-access-cronjob.yaml b/kube/services/jobs/google-delete-expired-access-cronjob.yaml
index a491865c3..2b9e4e49a 100644
--- a/kube/services/jobs/google-delete-expired-access-cronjob.yaml
+++ b/kube/services/jobs/google-delete-expired-access-cronjob.yaml
@@ -1,5 +1,5 @@
---
-apiVersion: batch/v1beta1
+apiVersion: batch/v1
kind: CronJob
metadata:
name: google-delete-expired-access
@@ -16,6 +16,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: useryaml-job
volumes:
- name: yaml-merge
diff --git a/kube/services/jobs/google-delete-expired-access-job.yaml b/kube/services/jobs/google-delete-expired-access-job.yaml
index 24e00742c..c50272254 100644
--- a/kube/services/jobs/google-delete-expired-access-job.yaml
+++ b/kube/services/jobs/google-delete-expired-access-job.yaml
@@ -9,6 +9,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: useryaml-job
volumes:
- name: yaml-merge
diff --git a/kube/services/jobs/google-delete-expired-service-account-cronjob.yaml b/kube/services/jobs/google-delete-expired-service-account-cronjob.yaml
index cbe8c049c..b40e22624 100644
--- a/kube/services/jobs/google-delete-expired-service-account-cronjob.yaml
+++ b/kube/services/jobs/google-delete-expired-service-account-cronjob.yaml
@@ -1,6 +1,6 @@
---
-# Note: change to batch/v1beta1 once we bump to k8s 1.8
-apiVersion: batch/v1beta1
+# Note: change to batch/v1 once we bump to k8s 1.8
+apiVersion: batch/v1
kind: CronJob
metadata:
name: google-delete-expired-service-account
@@ -17,6 +17,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: useryaml-job
volumes:
- name: yaml-merge
diff --git a/kube/services/jobs/google-delete-expired-service-account-job.yaml b/kube/services/jobs/google-delete-expired-service-account-job.yaml
index 99a7f8749..04c19f9e7 100644
--- a/kube/services/jobs/google-delete-expired-service-account-job.yaml
+++ b/kube/services/jobs/google-delete-expired-service-account-job.yaml
@@ -9,6 +9,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: useryaml-job
volumes:
- name: yaml-merge
diff --git a/kube/services/jobs/google-init-proxy-groups-cronjob.yaml b/kube/services/jobs/google-init-proxy-groups-cronjob.yaml
index 2453f5009..6b4fc10aa 100644
--- a/kube/services/jobs/google-init-proxy-groups-cronjob.yaml
+++ b/kube/services/jobs/google-init-proxy-groups-cronjob.yaml
@@ -1,6 +1,6 @@
---
-# Note: change to batch/v1beta1 once we bump to k8s 1.8
-apiVersion: batch/v1beta1
+# Note: change to batch/v1 once we bump to k8s 1.8
+apiVersion: batch/v1
kind: CronJob
metadata:
name: google-init-proxy-groups
@@ -17,6 +17,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: useryaml-job
volumes:
- name: yaml-merge
diff --git a/kube/services/jobs/google-init-proxy-groups-job.yaml b/kube/services/jobs/google-init-proxy-groups-job.yaml
index b342c7db5..3fa0eb63d 100644
--- a/kube/services/jobs/google-init-proxy-groups-job.yaml
+++ b/kube/services/jobs/google-init-proxy-groups-job.yaml
@@ -9,6 +9,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: useryaml-job
volumes:
- name: yaml-merge
diff --git a/kube/services/jobs/google-manage-account-access-cronjob.yaml b/kube/services/jobs/google-manage-account-access-cronjob.yaml
index 856c3b056..fd8bba606 100644
--- a/kube/services/jobs/google-manage-account-access-cronjob.yaml
+++ b/kube/services/jobs/google-manage-account-access-cronjob.yaml
@@ -1,6 +1,6 @@
---
-# Note: change to batch/v1beta1 once we bump to k8s 1.8
-apiVersion: batch/v1beta1
+# Note: change to batch/v1 once we bump to k8s 1.8
+apiVersion: batch/v1
kind: CronJob
metadata:
name: google-manage-account-access
@@ -17,6 +17,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: useryaml-job
volumes:
- name: yaml-merge
diff --git a/kube/services/jobs/google-manage-account-access-job.yaml b/kube/services/jobs/google-manage-account-access-job.yaml
index 09259088c..d7f6204a0 100644
--- a/kube/services/jobs/google-manage-account-access-job.yaml
+++ b/kube/services/jobs/google-manage-account-access-job.yaml
@@ -9,6 +9,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: useryaml-job
volumes:
- name: yaml-merge
diff --git a/kube/services/jobs/google-manage-keys-cronjob.yaml b/kube/services/jobs/google-manage-keys-cronjob.yaml
index ee92611ba..eff76d30a 100644
--- a/kube/services/jobs/google-manage-keys-cronjob.yaml
+++ b/kube/services/jobs/google-manage-keys-cronjob.yaml
@@ -1,6 +1,6 @@
---
-# Note: change to batch/v1beta1 once we bump to k8s 1.8
-apiVersion: batch/v1beta1
+# Note: change to batch/v1 once we bump to k8s 1.8
+apiVersion: batch/v1
kind: CronJob
metadata:
name: google-manage-keys
@@ -17,6 +17,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: useryaml-job
volumes:
- name: yaml-merge
diff --git a/kube/services/jobs/google-manage-keys-job.yaml b/kube/services/jobs/google-manage-keys-job.yaml
index 64773af34..84c855fb6 100644
--- a/kube/services/jobs/google-manage-keys-job.yaml
+++ b/kube/services/jobs/google-manage-keys-job.yaml
@@ -9,6 +9,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: useryaml-job
volumes:
- name: yaml-merge
diff --git a/kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml b/kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml
index b8bc21f88..49e83374f 100644
--- a/kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml
+++ b/kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml
@@ -1,6 +1,6 @@
---
-# Note: change to batch/v1beta1 once we bump to k8s 1.8
-apiVersion: batch/v1beta1
+# Note: change to batch/v1 once we bump to k8s 1.8
+apiVersion: batch/v1
kind: CronJob
metadata:
name: google-verify-bucket-access-group
@@ -17,6 +17,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: useryaml-job
volumes:
- name: yaml-merge
diff --git a/kube/services/jobs/google-verify-bucket-access-group-job.yaml b/kube/services/jobs/google-verify-bucket-access-group-job.yaml
index 3f756eaa5..93eae91dc 100644
--- a/kube/services/jobs/google-verify-bucket-access-group-job.yaml
+++ b/kube/services/jobs/google-verify-bucket-access-group-job.yaml
@@ -9,6 +9,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: useryaml-job
volumes:
- name: yaml-merge
diff --git a/kube/services/jobs/graph-create-job.yaml b/kube/services/jobs/graph-create-job.yaml
index 6fd859cc2..f6595cdd2 100644
--- a/kube/services/jobs/graph-create-job.yaml
+++ b/kube/services/jobs/graph-create-job.yaml
@@ -9,6 +9,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
volumes:
- name: creds-volume
diff --git a/kube/services/jobs/hatchery-metrics-job.yaml b/kube/services/jobs/hatchery-metrics-job.yaml
index 3a4e571f6..26f5ad973 100644
--- a/kube/services/jobs/hatchery-metrics-job.yaml
+++ b/kube/services/jobs/hatchery-metrics-job.yaml
@@ -9,6 +9,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
restartPolicy: Never
serviceAccountName: hatchery-service-account
securityContext:
diff --git a/kube/services/jobs/hatchery-reaper-job.yaml b/kube/services/jobs/hatchery-reaper-job.yaml
index 9278fb727..77d249e37 100644
--- a/kube/services/jobs/hatchery-reaper-job.yaml
+++ b/kube/services/jobs/hatchery-reaper-job.yaml
@@ -9,6 +9,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
restartPolicy: Never
serviceAccountName: hatchery-service-account
securityContext:
@@ -41,7 +58,60 @@ spec:
- |
export GEN3_HOME="$HOME/cloud-automation"
source "$GEN3_HOME/gen3/gen3setup.sh"
+ # 60 minute idle timeout max
+ limit=3600
+ namespace=$(gen3 jupyter j-namespace)
+ remote_users=$(kubectl get svc -n $namespace -o json | jq -r . | jq -r '.items[].metadata.annotations."getambassador.io/config"' | yq -r .headers.remote_user)
+
+ # helper function to construct service name
+ function escape() {
+ string="$1"
+ shift
+ safeBytes="abcdefghijklmnopqrstuvwxyz0123456789"
+ retString=""
+ while read -n 1 char ; do
+ if [[ $safeBytes == *"$char"* ]]; then
+ retString+=$char
+ else
+ hex=$(printf "%02x" "'${char}'")
+ retString+="-"$hex
+ fi
+ done <<< "$string"
+ echo $retString
+ }
+
+ for user in $remote_users; do
+ gen3_log_info "Checking possible workspaces to reap for $user"
+ status=$(curl -s -H "REMOTE_USER: $user" hatchery-service/status | jq -r .status)
+ if [ "$status" == "Running" ] || [ "$status" == "Launching" ]; then
+ gen3_log_info "$user has workspace that is $status"
+ serviceName=h-$(escape $user)-s
+ service="ambassador-service"
+ status_code=$(curl -s -w '%{http_code}' -o status.json -H "REMOTE_USER: $user" $service/api/status)
+ if [ "$status_code" == "200" ]; then
+ last_activity=$(curl -s -H "REMOTE_USER: $user" $service/api/status | jq -r .last_activity )
+ now=$(date +%s)
+ delta=$(expr $now - $(date -d "$last_activity" +%s))
+ gen3_log_info "Workspace for $user has been idle for $delta seconds"
+ if [ "$delta" -gt "$limit" ]; then
+ gen3_log_info "Workspace for $user has been running for $delta seconds, which is higher than the $limit... Terminating"
+ curl -XPOST -s -H "REMOTE_USER: $user" hatchery-service/terminate
+ fi
+ else
+ gen3_log_err "Error: Got HTTP status $status_code trying to get last_activity for $user. Not able to reap workspace"
+ fi
+ gen3_log_info "Checking if paymodel for $user is above limit"
+ pm_status=$(curl -s -H "REMOTE_USER: $user" hatchery-service/paymodels | jq -r .request_status)
+ if [ "$pm_status" == "above limit" ]; then
+ gen3_log_warn "Paymodel status is above limit for user: $user. Reaping the workspace"
+ curl -XPOST -s -H "REMOTE_USER: $user" hatchery-service/terminate
+ fi
+ fi
+ done
+
+ # legacy reaper code
+ gen3_log_info "Running legacy reaper job (based on local cluster/ prometheus)"
if appList="$(gen3 jupyter idle none "$(gen3 db namespace)" kill)" && [[ -n "$appList" && -n "$slackWebHook" && "$slackWebHook" != "None" ]]; then
curl -X POST --data-urlencode "payload={\"text\": \"hatchery-reaper in $gen3Hostname: \n\`\`\`\n${appList}\n\`\`\`\"}" "${slackWebHook}"
fi
- echo "All Done!"
+ gen3_log_info "All Done!"
\ No newline at end of file
diff --git a/kube/services/jobs/healthcheck-cronjob.yaml b/kube/services/jobs/healthcheck-cronjob.yaml
index 25888f32c..1ca71fc8d 100644
--- a/kube/services/jobs/healthcheck-cronjob.yaml
+++ b/kube/services/jobs/healthcheck-cronjob.yaml
@@ -1,4 +1,4 @@
-apiVersion: batch/v1beta1
+apiVersion: batch/v1
kind: CronJob
metadata:
name: healthcheck
@@ -15,6 +15,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
restartPolicy: Never
serviceAccountName: jenkins-service
containers:
diff --git a/kube/services/jobs/indexd-authz-job.yaml b/kube/services/jobs/indexd-authz-job.yaml
index a3fbb8658..8b041740e 100644
--- a/kube/services/jobs/indexd-authz-job.yaml
+++ b/kube/services/jobs/indexd-authz-job.yaml
@@ -8,6 +8,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
volumes:
- name: config-volume
diff --git a/kube/services/jobs/indexd-userdb-job.yaml b/kube/services/jobs/indexd-userdb-job.yaml
index e018f7a34..676307481 100644
--- a/kube/services/jobs/indexd-userdb-job.yaml
+++ b/kube/services/jobs/indexd-userdb-job.yaml
@@ -16,6 +16,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
volumes:
- name: config-volume
diff --git a/kube/services/jobs/metadata-aggregate-sync-job.yaml b/kube/services/jobs/metadata-aggregate-sync-job.yaml
index e4f6761f7..7f4043753 100644
--- a/kube/services/jobs/metadata-aggregate-sync-job.yaml
+++ b/kube/services/jobs/metadata-aggregate-sync-job.yaml
@@ -8,6 +8,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
volumes:
- name: config-volume-g3auto
secret:
@@ -20,7 +37,9 @@ spec:
configMap:
name: manifest-metadata
optional: true
- containers:
+ - name: shared-data
+ emptyDir: {}
+ initContainers:
- name: metadata
GEN3_METADATA_IMAGE
volumeMounts:
@@ -36,6 +55,8 @@ spec:
readOnly: true
mountPath: /metadata.json
subPath: json
+ - name: shared-data
+ mountPath: /mnt/shared
env:
- name: GEN3_DEBUG
GEN3_DEBUG_FLAG|-value: "False"-|
@@ -53,10 +74,57 @@ spec:
name: manifest-metadata
key: AGG_MDS_NAMESPACE
optional: true
+ - name: AGG_MDS_DEFAULT_DATA_DICT_FIELD
+ valueFrom:
+ configMapKeyRef:
+ name: manifest-metadata
+ key: AGG_MDS_DEFAULT_DATA_DICT_FIELD
+ optional: true
imagePullPolicy: Always
command: ["/bin/sh"]
args:
- "-c"
- |
/env/bin/python /src/src/mds/populate.py --config /aggregate_config.json
+ if [ $? -ne 0 ]; then
+ echo "WARNING: non zero exit code: $?"
+ else
+ touch /mnt/shared/success
+ fi
+ containers:
+ - name: awshelper
+ env:
+ - name: slackWebHook
+ valueFrom:
+ configMapKeyRef:
+ name: global
+ key: slack_webhook
+ - name: gen3Env
+ valueFrom:
+ configMapKeyRef:
+ name: manifest-global
+ key: hostname
+ GEN3_AWSHELPER_IMAGE|-image: quay.io/cdis/awshelper:master-|
+ volumeMounts:
+ - name: shared-data
+ mountPath: /mnt/shared
+ command: ["/bin/bash"]
+ args:
+ - "-c"
+ - |
+ if [[ ! "$slackWebHook" =~ ^http ]]; then
+ echo "Slack webhook not set"
+ exit 0
+ fi
+ if ! [ -f /mnt/shared/success ]; then
+ success="FAILED"
+ color="ff0000"
+ else
+ success="SUCCESS"
+ color="2EB67D"
+ fi
+ echo "Sending ${success} message to slack..."
+ payload="{\"attachments\": [{\"fallback\": \"JOB ${success}: metadata-aggregate-sync cronjob on ${gen3Env}\",\"color\": \"#${color}\",\"title\": \"JOB ${success}: metadata-aggregate-sync cronjob on ${gen3Env}\",\"text\": \"Pod name: ${HOSTNAME}\",\"ts\": \"$(date +%s)\"}]}"
+ echo "Payload=${payload}"
+ curl -X POST --data-urlencode "payload=${payload}" "${slackWebHook}"
restartPolicy: Never
diff --git a/kube/services/jobs/metadata-delete-expired-objects-job.yaml b/kube/services/jobs/metadata-delete-expired-objects-job.yaml
new file mode 100644
index 000000000..221b964a0
--- /dev/null
+++ b/kube/services/jobs/metadata-delete-expired-objects-job.yaml
@@ -0,0 +1,33 @@
+# Delete all expired MDS objects.
+#
+# Run `gen3 kube-setup-metadata-delete-expired-objects-job` to configure this job
+# and set it up as a cronjob.
+#
+# Add the job image to the manifest:
+# `"metadata-delete-expired-objects": "quay.io/cdis/metadata-delete-expired-objects:master"`
+#
+# Once set up, the job can be run with `gen3 job run metadata-delete-expired-objects-job`.
+
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: metadata-delete-expired-objects
+spec:
+ template:
+ metadata:
+ labels:
+ app: gen3job
+ spec:
+ volumes:
+ - name: config-volume
+ secret:
+ secretName: "metadata-delete-expired-objects-g3auto"
+ containers:
+ - name: metadata-delete-expired-objects
+ GEN3_METADATA-DELETE-EXPIRED-OBJECTS_IMAGE
+ imagePullPolicy: Always
+ volumeMounts:
+ - name: config-volume
+ readOnly: true
+ mountPath: /mnt
+ restartPolicy: Never
diff --git a/kube/services/jobs/opencost-report-argo-job.yaml b/kube/services/jobs/opencost-report-argo-job.yaml
index 0f31eca40..788bd1dec 100644
--- a/kube/services/jobs/opencost-report-argo-job.yaml
+++ b/kube/services/jobs/opencost-report-argo-job.yaml
@@ -5,16 +5,19 @@
# OPENCOST_URL $OPENCOST_URL \
#
# BUCKET_NAME(required)
-# Name of the bucket to upload the generated reports to.
+# Name of the bucket to upload the generated reports to.
# Make sure that there is a service account called "reports-service-account" with access to this bucket.
#
# OPENCOST_URL(optional)
# URL to query OpenCost API's. Default is https://kubecost-cost-analyzer.kubecost
-#
+#
+# CHANNEL(optional)
+# The slack channel ID that the alert will get sent to. Easiest way to find is to open slack in a browser, navigate to
+# the webpage and copy down the ID at the end of the URL that begins with a C.
#
# Example
# gen3 job run opencost-report-argo BUCKET_NAME opencost-report-bucket
-#
+#
# Cronjob Example
# gen3 job cron opencost-report-argo @daily BUCKET_NAME opencost-report-bucket
apiVersion: batch/v1
@@ -27,6 +30,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: reports-service-account
containers:
- name: send-report
@@ -42,14 +62,30 @@ spec:
key: environment
- name: BUCKET_NAME
GEN3_BUCKET_NAME|-value: ""-|
+ - name: slackWebHook
+ valueFrom:
+ configMapKeyRef:
+ name: global
+ key: slack_webhook
+ optional: true
+ - name: channel
+ GEN3_CHANNEL|-value: ""-|
command: [ "/bin/bash" ]
args:
- "-c"
- |
- proto-opencost-reporter GetAllocationReport \
- --from_days_before 9 \
+ proto-opencost-reporter GetAllocationReport \
+ --from_days_before 2 \
--to_days_before 1 \
--aggregate_by label:gen3username label:workflows.argoproj.io/workflow \
--filter_namespaces argo \
--share_idle_by_node
+ rc=$?
+ if [[ "${slackWebHook}" != 'None' ]]; then
+ if [ $rc != 0 ]; then
+ curl -X POST --data-urlencode "payload={\"text\": \"OPENCOST-REPORT-JOB-FAILED: Opencost report job failed to create a report\", \"channel\": \"${channel}\", \"username\": \"opencost-report-job\"}" "${slackWebHook}";
+ else
+ curl -X POST --data-urlencode "payload={\"text\": \"OPENCOST-REPORT-JOB-SUCCEEDED: Opencost report job created report\", \"channel\": \"${channel}\", \"username\": \"opencost-report-job\"}" "${slackWebHook}"
+ fi
+ fi
restartPolicy: Never
diff --git a/kube/services/jobs/psql-db-dump-va-testing-job.yaml b/kube/services/jobs/psql-db-dump-va-testing-job.yaml
new file mode 100644
index 000000000..8a8037e16
--- /dev/null
+++ b/kube/services/jobs/psql-db-dump-va-testing-job.yaml
@@ -0,0 +1,80 @@
+---
+# NOTE: This job was created specifically to dump all the databases in va-testing, in preparation for a move to second cluster
+# If you aren't doing that, this probably is not the job you're looking for
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: psql-db-dump-va-testing
+spec:
+ template:
+ metadata:
+ labels:
+ app: gen3job
+ spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
+ serviceAccountName: dbbackup-sa
+ containers:
+ - name: pgdump
+ image: quay.io/cdis/awshelper:master
+ imagePullPolicy: Always
+ env:
+ - name: gen3Env
+ valueFrom:
+ configMapKeyRef:
+ name: global
+ key: environment
+ - name: JENKINS_HOME
+ value: "devterm"
+ - name: GEN3_HOME
+ value: /home/ubuntu/cloud-automation
+ command: ["/bin/bash"]
+ args:
+ - "-c"
+ - |
+ source "${GEN3_HOME}/gen3/lib/utils.sh"
+ gen3_load "gen3/gen3setup"
+ account_id=$(aws sts get-caller-identity --query "Account" --output text)
+ default_bucket_name="gen3-db-backups-${account_id}"
+ default_databases=("fence" "indexd" "sheepdog" "peregrine" "arborist" "argo" "atlas" "metadata" "ohdsi" "omop-data" "wts")
+ s3_dir="va-testing-$(date +"%Y-%m-%d-%H-%M-%S")"
+ databases=("${default_databases[@]}")
+ bucket_name=$default_bucket_name
+
+ for database in "${databases[@]}"; do
+ gen3_log_info "Starting database backup for ${database}"
+ gen3 db backup "${database}" > "${database}.sql"
+
+ if [ $? -eq 0 ] && [ -f "${database}.sql" ]; then
+ gen3_log_info "Uploading backup file ${database}.sql to s3://${bucket_name}/${s3_dir}/${database}.sql"
+ aws s3 cp "${database}.sql" "s3://${bucket_name}/${s3_dir}/${database}.sql"
+
+ if [ $? -eq 0 ]; then
+ gen3_log_info "Successfully uploaded ${database}.sql to S3"
+ else
+ gen3_log_err "Failed to upload ${database}.sql to S3"
+ fi
+ gen3_log_info "Deleting temporary backup file ${database}.sql"
+ rm -f "${database}.sql"
+ else
+ gen3_log_err "Backup operation failed for ${database}"
+ rm -f "${database}.sql"
+ fi
+ done
+ sleep 600
+ restartPolicy: Never
diff --git a/kube/services/jobs/psql-db-prep-dump-job.yaml b/kube/services/jobs/psql-db-prep-dump-job.yaml
new file mode 100644
index 000000000..86c513b78
--- /dev/null
+++ b/kube/services/jobs/psql-db-prep-dump-job.yaml
@@ -0,0 +1,79 @@
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: psql-db-prep-dump
+spec:
+ template:
+ metadata:
+ labels:
+ app: gen3job
+ spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
+ serviceAccountName: dbbackup-sa
+ containers:
+ - name: pgdump
+ image: quay.io/cdis/awshelper:master
+ imagePullPolicy: Always
+ env:
+ - name: gen3Env
+ valueFrom:
+ configMapKeyRef:
+ name: global
+ key: environment
+ - name: JENKINS_HOME
+ value: "devterm"
+ - name: GEN3_HOME
+ value: /home/ubuntu/cloud-automation
+ command: [ "/bin/bash" ]
+ args:
+ - "-c"
+ - |
+ source "${GEN3_HOME}/gen3/lib/utils.sh"
+ gen3_load "gen3/gen3setup"
+ account_id=$(aws sts get-caller-identity --query "Account" --output text)
+ default_bucket_name="gen3-db-backups-${account_id}"
+ default_databases=("indexd" "sheepdog" "metadata")
+ s3_dir="$(date +"%Y-%m-%d-%H-%M-%S")"
+ databases=("${default_databases[@]}")
+ bucket_name=$default_bucket_name
+
+ for database in "${databases[@]}"; do
+ gen3_log_info "Starting database backup for ${database}"
+ gen3 db backup "${database}" > "${database}.sql"
+
+ if [ $? -eq 0 ] && [ -f "${database}.sql" ]; then
+ gen3_log_info "Uploading backup file ${database}.sql to s3://${bucket_name}/${s3_dir}/${database}.sql"
+ aws s3 cp "${database}.sql" "s3://${bucket_name}/${s3_dir}/${database}.sql"
+
+ if [ $? -eq 0 ]; then
+ gen3_log_info "Successfully uploaded ${database}.sql to S3"
+ else
+ gen3_log_err "Failed to upload ${database}.sql to S3"
+ fi
+ gen3_log_info "Deleting temporary backup file ${database}.sql"
+ rm -f "${database}.sql"
+ else
+ gen3_log_err "Backup operation failed for ${database}"
+ rm -f "${database}.sql"
+ fi
+ done
+ sleep 600
+ restartPolicy: Never
+
diff --git a/kube/services/jobs/psql-db-prep-restore-job.yaml b/kube/services/jobs/psql-db-prep-restore-job.yaml
new file mode 100644
index 000000000..710e6f4f1
--- /dev/null
+++ b/kube/services/jobs/psql-db-prep-restore-job.yaml
@@ -0,0 +1,90 @@
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: psql-db-prep-restore
+spec:
+ template:
+ metadata:
+ labels:
+ app: gen3job
+ spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
+ serviceAccountName: dbbackup-sa
+ containers:
+ - name: pgrestore
+ image: quay.io/cdis/awshelper:master
+ imagePullPolicy: Always
+ env:
+ - name: gen3Env
+ valueFrom:
+ configMapKeyRef:
+ name: global
+ key: environment
+ - name: JENKINS_HOME
+ value: "devterm"
+ - name: GEN3_HOME
+ value: /home/ubuntu/cloud-automation
+ command: [ "/bin/bash" ]
+ args:
+ - "-c"
+ - |
+ source "${GEN3_HOME}/gen3/lib/utils.sh"
+ gen3_load "gen3/gen3setup"
+ account_id=$(aws sts get-caller-identity --query "Account" --output text)
+ default_bucket_name="gen3-db-backups-${account_id}"
+ default_databases=("indexd" "sheepdog" "metadata")
+ backup_directories=$(aws s3 ls "s3://${default_bucket_name}/")
+ newest_directory=$(echo "$backup_directories" | awk '/PRE/ {if ($2 > max) max = $2} END {print max}')
+ databases=("${default_databases[@]}")
+ bucket_name=$default_bucket_name
+ namespace=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace)
+ date_str=$(date -u +%y%m%d_%H%M%S)
+ gen3_log_info "Database backup location in S3: ${bucket_name}/${newest_directory}"
+ gen3_log_info "namespace: $namespace \n\n"
+
+ for database in "${databases[@]}"; do
+ gen3_log_info "Downloading database backup file s3://${default_bucket_name}/${newest_directory}${database}.sql"
+ aws s3 cp "s3://${default_bucket_name}/${newest_directory}${database}.sql" "${database}.sql"
+ server=$(gen3 db creds "$database" | jq -r '.g3FarmServer')
+ username=$(gen3 db creds "$database" | jq -r '.db_username')
+ db_name="${namespace}_${database}_${date_str}"
+ if [[ -z "$server" || -z "$username" ]]; then
+ gen3_log_info "Error: Unable to extract server name or username."
+ return 1
+ fi
+ gen3 psql $database -c "create database $db_name;" 2>&1 | grep -q "permission denied"
+ if [ $? -eq 0 ]; then
+ gen3_log_info "User does not have permission to create database. Granting required permission..."
+ gen3 psql $server -c "alter user $username createdb;"
+ gen3 psql $database -c "create database $db_name;"
+ if [ $? -eq 0 ]; then
+ gen3_log_info "Database $db_name created successfully!"
+ else
+ gen3_log_info "Error creating database $db_name after granting permission."
+ fi
+ else
+ gen3_log_info "Database $db_name created successfully!"
+ fi
+ gen3_log_info "Starting database restore for ${database} to database $db_name"
+ gen3 psql "$database" -d "$db_name" -f "${database}.sql" 1>&2
+ gen3_log_info "cleanup temporary backup file ${database}.sql \n\n\n"
+ done
+ sleep 600
+ restartPolicy: Never
diff --git a/kube/services/jobs/psql-fix-job.yaml b/kube/services/jobs/psql-fix-job.yaml
index 20f453c2a..40fa74b96 100644
--- a/kube/services/jobs/psql-fix-job.yaml
+++ b/kube/services/jobs/psql-fix-job.yaml
@@ -9,6 +9,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: jenkins-service
containers:
- name: fix
diff --git a/kube/services/jobs/remove-objects-from-clouds-job.yaml b/kube/services/jobs/remove-objects-from-clouds-job.yaml
index 46aa3d43f..b839b24e7 100644
--- a/kube/services/jobs/remove-objects-from-clouds-job.yaml
+++ b/kube/services/jobs/remove-objects-from-clouds-job.yaml
@@ -11,6 +11,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
volumes:
- name: cred-volume
secret:
diff --git a/kube/services/jobs/replicate-validation-job.yaml b/kube/services/jobs/replicate-validation-job.yaml
index 13f767d69..d64cfcc13 100644
--- a/kube/services/jobs/replicate-validation-job.yaml
+++ b/kube/services/jobs/replicate-validation-job.yaml
@@ -11,6 +11,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
volumes:
- name: aws-cred-volume
secret:
diff --git a/kube/services/jobs/s3sync-cronjob.yaml b/kube/services/jobs/s3sync-cronjob.yaml
index 14053492f..69d66ec3f 100644
--- a/kube/services/jobs/s3sync-cronjob.yaml
+++ b/kube/services/jobs/s3sync-cronjob.yaml
@@ -5,7 +5,7 @@
#####REQUIRED VARIABLE########
#SOURCE_BUCKET
#TARGET_BUCKET
-apiVersion: batch/v1beta1
+apiVersion: batch/v1
kind: CronJob
metadata:
name: s3sync
@@ -21,6 +21,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
volumes:
- name: cred-volume
secret:
@@ -40,7 +57,7 @@ spec:
valueFrom:
configMapKeyRef:
name: global
- key: environment
+ key: hostname
- name: SOURCE_BUCKET
GEN3_SOURCE_BUCKET
- name: TARGET_BUCKET
diff --git a/kube/services/jobs/usersync-job.yaml b/kube/services/jobs/usersync-job.yaml
index 915f1a588..8a5471a20 100644
--- a/kube/services/jobs/usersync-job.yaml
+++ b/kube/services/jobs/usersync-job.yaml
@@ -31,6 +31,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: useryaml-job
volumes:
- name: yaml-merge
@@ -75,7 +92,7 @@ spec:
configMap:
name: "projects"
containers:
- - name: fence
+ - name: usersync
GEN3_FENCE_IMAGE
imagePullPolicy: Always
env:
@@ -108,7 +125,7 @@ spec:
valueFrom:
configMapKeyRef:
name: global
- key: environment
+ key: hostname
- name: FENCE_PUBLIC_CONFIG
valueFrom:
configMapKeyRef:
@@ -243,7 +260,7 @@ spec:
exit 1
fi
#-----------------
- echo "awshelper downloading ${userYamlS3Path} to /mnt/shared/useryaml";
+ echo "awshelper downloading ${userYamlS3Path} to /mnt/shared/user.yaml";
n=0
until [ $n -ge 5 ]; do
echo "Download attempt $n"
@@ -277,7 +294,7 @@ spec:
valueFrom:
configMapKeyRef:
name: global
- key: environment
+ key: hostname
- name: slackWebHook
valueFrom:
configMapKeyRef:
diff --git a/kube/services/jobs/useryaml-job.yaml b/kube/services/jobs/useryaml-job.yaml
index bf3812951..5853a05c4 100644
--- a/kube/services/jobs/useryaml-job.yaml
+++ b/kube/services/jobs/useryaml-job.yaml
@@ -9,6 +9,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
volumes:
- name: yaml-merge
diff --git a/kube/services/jupyterhub/jupyterhub-deploy.yaml b/kube/services/jupyterhub/jupyterhub-deploy.yaml
index b2b96ff75..38b2cd41d 100644
--- a/kube/services/jupyterhub/jupyterhub-deploy.yaml
+++ b/kube/services/jupyterhub/jupyterhub-deploy.yaml
@@ -18,6 +18,23 @@ spec:
userhelper: "yes"
GEN3_DATE_LABEL
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: jupyter-service
volumes:
- name: config-volume
diff --git a/kube/services/karpenter-reconciler/application.yaml b/kube/services/karpenter-reconciler/application.yaml
new file mode 100644
index 000000000..fb0fab871
--- /dev/null
+++ b/kube/services/karpenter-reconciler/application.yaml
@@ -0,0 +1,22 @@
+apiVersion: argoproj.io/v1alpha1
+kind: Application
+metadata:
+ name: karpenter-reconciler-application
+ namespace: argocd
+spec:
+ destination:
+ namespace: kube-system
+ server: https://kubernetes.default.svc
+ project: default
+ source:
+ repoURL: https://github.com/uc-cdis/cloud-automation.git
+ targetRevision: master
+ path: kube/services/karpenter-reconciler
+ directory:
+ exclude: "application.yaml"
+ syncPolicy:
+ automated:
+ prune: true
+ selfHeal: true
+ syncOptions:
+ - CreateNamespace=true
diff --git a/kube/services/karpenter-reconciler/auth.yaml b/kube/services/karpenter-reconciler/auth.yaml
new file mode 100644
index 000000000..c159028ab
--- /dev/null
+++ b/kube/services/karpenter-reconciler/auth.yaml
@@ -0,0 +1,44 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: karpenter-reconciler
+ namespace: argo-events
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: karpenter-admin-binding-reconciler
+subjects:
+ - kind: ServiceAccount
+ name: karpenter-reconciler
+ namespace: argo-events
+roleRef:
+ kind: ClusterRole
+ name: karpenter-admin
+ apiGroup: rbac.authorization.k8s.io
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: workflow-viewer-reconciler
+subjects:
+ - kind: ServiceAccount
+ name: karpenter-reconciler
+ namespace: argo-events
+roleRef:
+ kind: ClusterRole
+ name: argo-argo-workflows-view
+ apiGroup: rbac.authorization.k8s.io
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: viewer-reconciler
+subjects:
+ - kind: ServiceAccount
+ name: karpenter-reconciler
+ namespace: argo-events
+roleRef:
+ kind: ClusterRole
+ name: system:aggregate-to-view
+ apiGroup: rbac.authorization.k8s.io
diff --git a/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob-va-testing.yaml b/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob-va-testing.yaml
new file mode 100644
index 000000000..aaba57b07
--- /dev/null
+++ b/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob-va-testing.yaml
@@ -0,0 +1,71 @@
+apiVersion: batch/v1
+kind: CronJob
+metadata:
+ name: karpenter-reconciler-cronjob-va-testing
+ namespace: argo-events
+spec:
+ schedule: "*/5 * * * *"
+ jobTemplate:
+ spec:
+ template:
+ metadata:
+ labels:
+ app: gen3job
+ spec:
+ serviceAccount: karpenter-reconciler
+ volumes:
+ - name: karpenter-templates-volume
+ configMap:
+ name: karpenter-templates
+ containers:
+ - name: karpenter-reconciler
+ image: quay.io/cdis/awshelper
+ volumeMounts:
+ - name: karpenter-templates-volume
+ mountPath: /manifests
+ env:
+ - name: PROVISIONER_TEMPLATE
+ value: /manifests/provisioner.yaml
+ - name: AWSNODETEMPLATE_TEMPLATE
+ value: /manifests/nodetemplate.yaml
+ command: ["/bin/bash"]
+ args:
+ - "-c"
+ - |
+ #!/bin/bash
+ if [ -z "$PROVISIONER_TEMPLATE" ]; then
+ PROVISIONER_TEMPLATE="provisioner.yaml"
+ fi
+
+ if [ -z "$AWSNODETEMPLATE_TEMPLATE" ]; then
+ AWSNODETEMPLATE_TEMPLATE="nodetemplate.yaml"
+ fi
+
+ ENVIRONMENT=$(kubectl -n va-testing get configmap global -o jsonpath="{.data.environment}")
+
+ WORKFLOWS=$(kubectl get workflows -n argo -o=jsonpath='{range .items[*]}{.metadata.name}{" "}{.metadata.labels.gen3username}{"\n"}')
+
+ WORKFLOW_ARRAY=()
+
+ while IFS= read -r line; do
+ WORKFLOW_ARRAY+=("$line")
+ done <<< "$WORKFLOWS"
+
+ for workflow in "${WORKFLOW_ARRAY[@]}"
+ do
+ echo "Running loop for workflow: $workflow"
+ workflow_name=$(echo "$workflow" | awk '{print $1}')
+ workflow_user=$(echo "$workflow" | awk '{print $2}')
+
+ if ! kubectl get awsnodetemplate workflow-$workflow_name >/dev/null 2>&1; then
+ echo "No awsnodetemplate found for ${workflow_name}, creating one"
+ sed -e "s/WORKFLOW_NAME/$workflow_name/" -e "s/GEN3_USERNAME/$workflow_user/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$AWSNODETEMPLATE_TEMPLATE" | kubectl apply -f -
+ fi
+
+ if ! kubectl get provisioner workflow-$workflow_name >/dev/null 2>&1; then
+ echo "No provisioner found for ${workflow_name}, creating one"
+ sed -e "s/WORKFLOW_NAME/$workflow_name/" -e "s/GEN3_USERNAME/$workflow_user/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$PROVISIONER_TEMPLATE" | kubectl apply -f -
+
+ fi
+ done
+ restartPolicy: OnFailure
diff --git a/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob.yaml b/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob.yaml
new file mode 100644
index 000000000..aef5d6c49
--- /dev/null
+++ b/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob.yaml
@@ -0,0 +1,74 @@
+apiVersion: batch/v1
+kind: CronJob
+metadata:
+ name: karpenter-reconciler-cronjob
+ namespace: argo-events
+spec:
+ schedule: "*/5 * * * *"
+ jobTemplate:
+ spec:
+ template:
+ metadata:
+ labels:
+ app: gen3job
+ spec:
+ serviceAccount: karpenter-reconciler
+ volumes:
+ - name: karpenter-templates-volume
+ configMap:
+ name: karpenter-templates
+ containers:
+ - name: karpenter-reconciler
+ image: quay.io/cdis/awshelper
+ volumeMounts:
+ - name: karpenter-templates-volume
+ mountPath: /manifests
+ env:
+ - name: PROVISIONER_TEMPLATE
+ value: /manifests/provisioner.yaml
+ - name: AWSNODETEMPLATE_TEMPLATE
+ value: /manifests/nodetemplate.yaml
+ command: ["/bin/bash"]
+ args:
+ - "-c"
+ - |
+ #!/bin/bash
+ if [ -z "$PROVISIONER_TEMPLATE" ]; then
+ PROVISIONER_TEMPLATE="provisioner.yaml"
+ fi
+
+ if [ -z "$AWSNODETEMPLATE_TEMPLATE" ]; then
+ AWSNODETEMPLATE_TEMPLATE="nodetemplate.yaml"
+ fi
+
+ ENVIRONMENT=$(kubectl -n default get configmap global -o jsonpath="{.data.environment}")
+
+ WORKFLOWS=$(kubectl get workflows -n argo -o=jsonpath='{range .items[*]}{.metadata.name}{" "}{.metadata.labels.gen3username}{"\n"}')
+
+ WORKFLOW_ARRAY=()
+
+ while IFS= read -r line; do
+ WORKFLOW_ARRAY+=("$line")
+ done <<< "$WORKFLOWS"
+
+ echo $WORKFLOWS
+
+ for workflow in "${WORKFLOW_ARRAY[@]}"
+ do
+ workflow_name=$(echo "$workflow" | awk '{print $1}')
+ workflow_user=$(echo "$workflow" | awk '{print $2}')
+
+ if [ ! -z "$workflow_name" ]; then
+ if ! kubectl get awsnodetemplate workflow-$workflow_name >/dev/null 2>&1; then
+ echo "No awsnodetemplate found for ${workflow_name}, creating one"
+ sed -e "s/WORKFLOW_NAME/$workflow_name/" -e "s/GEN3_USERNAME/$workflow_user/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$AWSNODETEMPLATE_TEMPLATE" | kubectl apply -f -
+ fi
+
+ if ! kubectl get provisioner workflow-$workflow_name >/dev/null 2>&1; then
+ echo "No provisioner found for ${workflow_name}, creating one"
+ sed -e "s/WORKFLOW_NAME/$workflow_name/" -e "s/GEN3_USERNAME/$workflow_user/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$PROVISIONER_TEMPLATE" | kubectl apply -f -
+
+ fi
+ fi
+ done
+ restartPolicy: OnFailure
diff --git a/kube/services/karpenter/binfmt.yaml b/kube/services/karpenter/binfmt.yaml
new file mode 100644
index 000000000..35cf5b559
--- /dev/null
+++ b/kube/services/karpenter/binfmt.yaml
@@ -0,0 +1,42 @@
+# Run binfmt setup on any new node
+# https://kubernetes.io/docs/concepts/workloads/controllers/daemonset
+# https://github.com/docker/buildx/issues/342#issuecomment-680715762
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: binfmt
+ # namespace: kube-system
+ labels:
+ app: binfmt-setup
+spec:
+ selector:
+ matchLabels:
+ name: binfmt
+ # https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates
+ template:
+ metadata:
+ labels:
+ name: binfmt
+ spec:
+ nodeSelector:
+ kubernetes.io/arch: "arm64"
+ initContainers:
+ - name: binfmt
+ image: tonistiigi/binfmt
+ # command: []
+ args: ["--install", "all"]
+ # Run the container with the privileged flag
+ # https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+ # https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#securitycontext-v1-core
+ securityContext:
+ privileged: true
+ containers:
+ - name: pause
+ image: gcr.io/google_containers/pause:3.2
+ resources:
+ limits:
+ cpu: 50m
+ memory: 50Mi
+ requests:
+ cpu: 50m
+ memory: 50Mi
diff --git a/kube/services/karpenter/karpenter-global-settings.yaml b/kube/services/karpenter/karpenter-global-settings.yaml
new file mode 100644
index 000000000..4c09a465d
--- /dev/null
+++ b/kube/services/karpenter/karpenter-global-settings.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: karpenter-global-settings
+ namespace: karpenter
+data:
+ aws.interruptionQueueName: SQS_NAME
\ No newline at end of file
diff --git a/kube/services/karpenter/nodeTemplateDefault.yaml b/kube/services/karpenter/nodeTemplateDefault.yaml
new file mode 100644
index 000000000..6ba8b3a0f
--- /dev/null
+++ b/kube/services/karpenter/nodeTemplateDefault.yaml
@@ -0,0 +1,66 @@
+apiVersion: karpenter.k8s.aws/v1alpha1
+kind: AWSNodeTemplate
+metadata:
+ name: default
+spec:
+ subnetSelector:
+ karpenter.sh/discovery: VPC_NAME
+ securityGroupSelector:
+ karpenter.sh/discovery: VPC_NAME
+ tags:
+ karpenter.sh/discovery: VPC_NAME
+ Environment: VPC_NAME
+ Name: eks-VPC_NAME-karpenter
+ purpose: default
+ metadataOptions:
+ httpEndpoint: enabled
+ httpProtocolIPv6: disabled
+ httpPutResponseHopLimit: 2
+ httpTokens: optional
+ userData: |
+ MIME-Version: 1.0
+ Content-Type: multipart/mixed; boundary="BOUNDARY"
+
+ --BOUNDARY
+ Content-Type: text/x-shellscript; charset="us-ascii"
+
+ #!/bin/bash -x
+ instanceId=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | jq -r .instanceId)
+ curl https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/files/authorized_keys/ops_team >> /home/ec2-user/.ssh/authorized_keys
+
+ echo "$(jq '.registryPullQPS=0' /etc/kubernetes/kubelet/kubelet-config.json)" > /etc/kubernetes/kubelet/kubelet-config.json
+
+ sysctl -w fs.inotify.max_user_watches=12000
+
+ sudo yum update -y
+ sudo yum install -y dracut-fips openssl >> /opt/fips-install.log
+ sudo dracut -f
+ # configure grub
+ sudo /sbin/grubby --update-kernel=ALL --args="fips=1"
+
+ # --BOUNDARY
+ # Content-Type: text/cloud-config; charset="us-ascii"
+
+ # mounts:
+ # - ['fstype': 'bpf', 'mountpoint': '/sys/fs/bpf', 'opts': 'rw,relatime']
+
+ --BOUNDARY
+
+ Content-Type: text/cloud-config; charset="us-ascii"
+
+ power_state:
+ delay: now
+ mode: reboot
+ message: Powering off
+ timeout: 2
+ condition: true
+
+
+ --BOUNDARY--
+ blockDeviceMappings:
+ - deviceName: /dev/xvda
+ ebs:
+ volumeSize: 50Gi
+ volumeType: gp2
+ encrypted: true
+ deleteOnTermination: true
diff --git a/kube/services/karpenter/nodeTemplateGPU.yaml b/kube/services/karpenter/nodeTemplateGPU.yaml
new file mode 100644
index 000000000..925e7a9a0
--- /dev/null
+++ b/kube/services/karpenter/nodeTemplateGPU.yaml
@@ -0,0 +1,64 @@
+apiVersion: karpenter.k8s.aws/v1alpha1
+kind: AWSNodeTemplate
+metadata:
+ name: gpu
+spec:
+ subnetSelector:
+ karpenter.sh/discovery: VPC_NAME
+ securityGroupSelector:
+ karpenter.sh/discovery: VPC_NAME-gpu
+ tags:
+ Environment: VPC_NAME
+ Name: eks-VPC_NAME-gpu-karpenter
+ karpenter.sh/discovery: VPC_NAME
+ purpose: gpu
+ metadataOptions:
+ httpEndpoint: enabled
+ httpProtocolIPv6: disabled
+ httpPutResponseHopLimit: 2
+ httpTokens: optional
+ userData: |
+ MIME-Version: 1.0
+ Content-Type: multipart/mixed; boundary="BOUNDARY"
+
+ --BOUNDARY
+ Content-Type: text/x-shellscript; charset="us-ascii"
+
+ #!/bin/bash -x
+ instanceId=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | jq -r .instanceId)
+ curl https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/files/authorized_keys/ops_team >> /home/ec2-user/.ssh/authorized_keys
+
+ echo "$(jq '.registryPullQPS=0' /etc/kubernetes/kubelet/kubelet-config.json)" > /etc/kubernetes/kubelet/kubelet-config.json
+
+ sysctl -w fs.inotify.max_user_watches=12000
+
+ sudo yum update -y
+ sudo yum install -y dracut-fips openssl >> /opt/fips-install.log
+ sudo dracut -f
+ # configure grub
+ sudo /sbin/grubby --update-kernel=ALL --args="fips=1"
+
+ # --BOUNDARY
+ # Content-Type: text/cloud-config; charset="us-ascii"
+
+ # mounts:
+ # - ['fstype': 'bpf', 'mountpoint': '/sys/fs/bpf', 'opts': 'rw,relatime']
+
+ --BOUNDARY
+ Content-Type: text/cloud-config; charset="us-ascii"
+
+ power_state:
+ delay: now
+ mode: reboot
+ message: Powering off
+ timeout: 2
+ condition: true
+
+ --BOUNDARY--
+ blockDeviceMappings:
+ - deviceName: /dev/xvda
+ ebs:
+ volumeSize: 200Gi
+ volumeType: gp2
+ encrypted: true
+ deleteOnTermination: true
diff --git a/kube/services/karpenter/nodeTemplateJupyter.yaml b/kube/services/karpenter/nodeTemplateJupyter.yaml
new file mode 100644
index 000000000..1c8970ad6
--- /dev/null
+++ b/kube/services/karpenter/nodeTemplateJupyter.yaml
@@ -0,0 +1,64 @@
+apiVersion: karpenter.k8s.aws/v1alpha1
+kind: AWSNodeTemplate
+metadata:
+ name: jupyter
+spec:
+ subnetSelector:
+ karpenter.sh/discovery: VPC_NAME
+ securityGroupSelector:
+ karpenter.sh/discovery: VPC_NAME-jupyter
+ tags:
+ Environment: VPC_NAME
+ Name: eks-VPC_NAME-jupyter-karpenter
+ karpenter.sh/discovery: VPC_NAME
+ purpose: jupyter
+ metadataOptions:
+ httpEndpoint: enabled
+ httpProtocolIPv6: disabled
+ httpPutResponseHopLimit: 2
+ httpTokens: optional
+ userData: |
+ MIME-Version: 1.0
+ Content-Type: multipart/mixed; boundary="BOUNDARY"
+
+ --BOUNDARY
+ Content-Type: text/x-shellscript; charset="us-ascii"
+
+ #!/bin/bash -x
+ instanceId=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | jq -r .instanceId)
+ curl https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/files/authorized_keys/ops_team >> /home/ec2-user/.ssh/authorized_keys
+
+ echo "$(jq '.registryPullQPS=0' /etc/kubernetes/kubelet/kubelet-config.json)" > /etc/kubernetes/kubelet/kubelet-config.json
+
+ sysctl -w fs.inotify.max_user_watches=12000
+
+ sudo yum update -y
+ sudo yum install -y dracut-fips openssl >> /opt/fips-install.log
+ sudo dracut -f
+ # configure grub
+ sudo /sbin/grubby --update-kernel=ALL --args="fips=1"
+
+ # --BOUNDARY
+ # Content-Type: text/cloud-config; charset="us-ascii"
+
+ # mounts:
+ # - ['fstype': 'bpf', 'mountpoint': '/sys/fs/bpf', 'opts': 'rw,relatime']
+
+ --BOUNDARY
+ Content-Type: text/cloud-config; charset="us-ascii"
+
+ power_state:
+ delay: now
+ mode: reboot
+ message: Powering off
+ timeout: 2
+ condition: true
+
+ --BOUNDARY--
+ blockDeviceMappings:
+ - deviceName: /dev/xvda
+ ebs:
+ volumeSize: 50Gi
+ volumeType: gp2
+ encrypted: true
+ deleteOnTermination: true
diff --git a/kube/services/karpenter/nodeTemplateWorkflow.yaml b/kube/services/karpenter/nodeTemplateWorkflow.yaml
new file mode 100644
index 000000000..6e47b22f9
--- /dev/null
+++ b/kube/services/karpenter/nodeTemplateWorkflow.yaml
@@ -0,0 +1,64 @@
+apiVersion: karpenter.k8s.aws/v1alpha1
+kind: AWSNodeTemplate
+metadata:
+ name: workflow
+spec:
+ subnetSelector:
+ karpenter.sh/discovery: VPC_NAME
+ securityGroupSelector:
+ karpenter.sh/discovery: VPC_NAME-workflow
+ tags:
+ Environment: VPC_NAME
+ Name: eks-VPC_NAME-workflow-karpenter
+ karpenter.sh/discovery: VPC_NAME
+ purpose: workflow
+ metadataOptions:
+ httpEndpoint: enabled
+ httpProtocolIPv6: disabled
+ httpPutResponseHopLimit: 2
+ httpTokens: optional
+ userData: |
+ MIME-Version: 1.0
+ Content-Type: multipart/mixed; boundary="BOUNDARY"
+
+ --BOUNDARY
+ Content-Type: text/x-shellscript; charset="us-ascii"
+
+ #!/bin/bash -x
+ instanceId=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | jq -r .instanceId)
+ curl https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/files/authorized_keys/ops_team >> /home/ec2-user/.ssh/authorized_keys
+
+ echo "$(jq '.registryPullQPS=0' /etc/kubernetes/kubelet/kubelet-config.json)" > /etc/kubernetes/kubelet/kubelet-config.json
+
+ sysctl -w fs.inotify.max_user_watches=12000
+
+ sudo yum update -y
+ sudo yum install -y dracut-fips openssl >> /opt/fips-install.log
+ sudo dracut -f
+ # configure grub
+ sudo /sbin/grubby --update-kernel=ALL --args="fips=1"
+
+ # --BOUNDARY
+ # Content-Type: text/cloud-config; charset="us-ascii"
+
+ # mounts:
+ # - ['fstype': 'bpf', 'mountpoint': '/sys/fs/bpf', 'opts': 'rw,relatime']
+
+ --BOUNDARY
+ Content-Type: text/cloud-config; charset="us-ascii"
+
+ power_state:
+ delay: now
+ mode: reboot
+ message: Powering off
+ timeout: 2
+ condition: true
+
+ --BOUNDARY--
+ blockDeviceMappings:
+ - deviceName: /dev/xvda
+ ebs:
+ volumeSize: 50Gi
+ volumeType: gp2
+ encrypted: true
+ deleteOnTermination: true
diff --git a/kube/services/karpenter/nvdp.yaml b/kube/services/karpenter/nvdp.yaml
new file mode 100644
index 000000000..4c37a9c27
--- /dev/null
+++ b/kube/services/karpenter/nvdp.yaml
@@ -0,0 +1,33 @@
+config:
+ # ConfigMap name if pulling from an external ConfigMap
+ name: ""
+ # Set of named configs to build an integrated ConfigMap from
+ map:
+ default: |-
+ version: v1
+ flags:
+ migStrategy: "none"
+ failOnInitError: true
+ nvidiaDriverRoot: "/"
+ plugin:
+ passDeviceSpecs: false
+ deviceListStrategy: envvar
+ deviceIDStrategy: uuid
+ shared_gpu: |-
+ version: v1
+ flags:
+ migStrategy: "none"
+ failOnInitError: true
+ nvidiaDriverRoot: "/"
+ plugin:
+ passDeviceSpecs: false
+ deviceListStrategy: envvar
+ deviceIDStrategy: uuid
+ sharing:
+ timeSlicing:
+ renameByDefault: false
+ resources:
+ - name: nvidia.com/gpu
+ replicas: 10
+nodeSelector:
+ jina.ai/gpu-type: nvidia
\ No newline at end of file
diff --git a/kube/services/karpenter/provisionerArm.yaml b/kube/services/karpenter/provisionerArm.yaml
new file mode 100644
index 000000000..2f53581a2
--- /dev/null
+++ b/kube/services/karpenter/provisionerArm.yaml
@@ -0,0 +1,35 @@
+apiVersion: karpenter.sh/v1alpha5
+kind: Provisioner
+metadata:
+ name: default
+spec:
+ # Allow for spot and on demand instances
+ requirements:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values: ["on-demand", "spot"]
+ - key: kubernetes.io/arch
+ operator: In
+ values:
+ - arm64
+ - amd64
+ - key: karpenter.k8s.aws/instance-category
+ operator: In
+ values:
+ - c
+ - m
+ - r
+ - t
+ # Set a limit of 1000 vcpus
+ limits:
+ resources:
+ cpu: 1000
+ # Use the default node template
+ providerRef:
+ name: default
+ # Allow pods to be rearranged
+ consolidation:
+ enabled: true
+ # Kill nodes after 30 days to ensure they stay up to date
+ ttlSecondsUntilExpired: 2592000
+
diff --git a/kube/services/karpenter/provisionerDefault.yaml b/kube/services/karpenter/provisionerDefault.yaml
new file mode 100644
index 000000000..ac08284ce
--- /dev/null
+++ b/kube/services/karpenter/provisionerDefault.yaml
@@ -0,0 +1,35 @@
+apiVersion: karpenter.sh/v1alpha5
+kind: Provisioner
+metadata:
+ name: default
+spec:
+ # Allow for spot and on demand instances
+ requirements:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values: ["on-demand", "spot"]
+ - key: kubernetes.io/arch
+ operator: In
+ values:
+ - amd64
+ - key: karpenter.k8s.aws/instance-category
+ operator: In
+ values:
+ - c
+ - m
+ - r
+ - t
+ # Set a limit of 1000 vcpus
+ limits:
+ resources:
+ cpu: 1000
+ # Use the default node template
+ providerRef:
+ name: default
+ # Allow pods to be rearranged
+ consolidation:
+ enabled: true
+ # Kill nodes after 30 days to ensure they stay up to date
+ ttlSecondsUntilExpired: 2592000
+
+
diff --git a/kube/services/karpenter/provisionerGPU.yaml b/kube/services/karpenter/provisionerGPU.yaml
new file mode 100644
index 000000000..77a6b3876
--- /dev/null
+++ b/kube/services/karpenter/provisionerGPU.yaml
@@ -0,0 +1,29 @@
+apiVersion: karpenter.sh/v1alpha5
+kind: Provisioner
+metadata:
+ name: gpu
+spec:
+ ttlSecondsAfterEmpty: 300
+ labels:
+ jina.ai/node-type: gpu
+ jina.ai/gpu-type: nvidia
+ requirements:
+ - key: node.kubernetes.io/instance-type
+ operator: In
+ values: ["g4dn.xlarge", "g4dn.2xlarge", "g4dn.4xlarge", "g4dn.12xlarge"]
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values: ["spot", "on-demand"]
+ - key: kubernetes.io/arch
+ operator: In
+ values: ["amd64"]
+ taints:
+ - key: nvidia.com/gpu
+ effect: "NoSchedule"
+ limits:
+ resources:
+ cpu: 1000
+ # Use the default node template
+ providerRef:
+ name: gpu
+ ttlSecondsAfterEmpty: 30
diff --git a/kube/services/karpenter/provisionerGPUShared.yaml b/kube/services/karpenter/provisionerGPUShared.yaml
new file mode 100644
index 000000000..fa108c512
--- /dev/null
+++ b/kube/services/karpenter/provisionerGPUShared.yaml
@@ -0,0 +1,30 @@
+apiVersion: karpenter.sh/v1alpha5
+kind: Provisioner
+metadata:
+ name: gpu-shared
+spec:
+ ttlSecondsAfterEmpty: 300
+ labels:
+ jina.ai/node-type: gpu-shared
+ jina.ai/gpu-type: nvidia
+ nvidia.com/device-plugin.config: shared_gpu
+ requirements:
+ - key: karpenter.k8s.aws/instance-family
+ operator: In
+ values: ["g4dn", "g5","p4","p3"]
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values: ["spot", "on-demand"]
+ - key: kubernetes.io/arch
+ operator: In
+ values: ["amd64"]
+ taints:
+ - key: nvidia.com/gpu-shared
+ effect: "NoSchedule"
+ limits:
+ resources:
+ cpu: 1000
+ # Use the default node template
+ providerRef:
+ name: gpu
+ ttlSecondsAfterEmpty: 30
diff --git a/kube/services/karpenter/provisionerJupyter.yaml b/kube/services/karpenter/provisionerJupyter.yaml
new file mode 100644
index 000000000..0d4b1c85e
--- /dev/null
+++ b/kube/services/karpenter/provisionerJupyter.yaml
@@ -0,0 +1,40 @@
+apiVersion: karpenter.sh/v1alpha5
+kind: Provisioner
+metadata:
+ name: jupyter
+spec:
+ # Only allow on demand instance
+ requirements:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values: ["on-demand"]
+ - key: kubernetes.io/arch
+ operator: In
+ values:
+ - amd64
+ - key: karpenter.k8s.aws/instance-category
+ operator: In
+ values:
+ - c
+ - m
+ - r
+ - t
+ # Set a taint for jupyter pods
+ taints:
+ - key: role
+ value: jupyter
+ effect: NoSchedule
+ labels:
+ role: jupyter
+ # Set a limit of 1000 vcpus
+ limits:
+ resources:
+ cpu: 1000
+ # Use the jupyter node template
+ providerRef:
+ name: jupyter
+ # Allow pods to be rearranged
+ consolidation:
+ enabled: true
+ # Kill nodes after 30 days to ensure they stay up to date
+ ttlSecondsUntilExpired: 2592000
diff --git a/kube/services/karpenter/provisionerWorkflow.yaml b/kube/services/karpenter/provisionerWorkflow.yaml
new file mode 100644
index 000000000..f43dbf648
--- /dev/null
+++ b/kube/services/karpenter/provisionerWorkflow.yaml
@@ -0,0 +1,37 @@
+apiVersion: karpenter.sh/v1alpha5
+kind: Provisioner
+metadata:
+ name: workflow
+spec:
+ requirements:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values: ["on-demand"]
+ - key: kubernetes.io/arch
+ operator: In
+ values:
+ - amd64
+ - key: karpenter.k8s.aws/instance-category
+ operator: In
+ values:
+ - c
+ - m
+ - r
+ - t
+ taints:
+ - key: role
+ value: workflow
+ effect: NoSchedule
+ labels:
+ role: workflow
+ limits:
+ resources:
+ cpu: 1000
+ providerRef:
+ name: workflow
+ # Allow pods to be rearranged
+ consolidation:
+ enabled: true
+ # Kill nodes after 30 days to ensure they stay up to date
+ ttlSecondsUntilExpired: 2592000
+
diff --git a/kube/services/kayako-wrapper/kayako-wrapper-deploy.yaml b/kube/services/kayako-wrapper/kayako-wrapper-deploy.yaml
index 936f72520..9805a8e38 100644
--- a/kube/services/kayako-wrapper/kayako-wrapper-deploy.yaml
+++ b/kube/services/kayako-wrapper/kayako-wrapper-deploy.yaml
@@ -24,7 +24,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -33,6 +33,22 @@ spec:
values:
- kayako-wrapper
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
volumes:
- name: ca-volume
diff --git a/kube/services/kubecost-standalone/kubecost-alb.yaml b/kube/services/kubecost/kubecost-alb.yaml
similarity index 100%
rename from kube/services/kubecost-standalone/kubecost-alb.yaml
rename to kube/services/kubecost/kubecost-alb.yaml
diff --git a/kube/services/kubecost-standalone/object-store.yaml b/kube/services/kubecost/object-store.yaml
similarity index 100%
rename from kube/services/kubecost-standalone/object-store.yaml
rename to kube/services/kubecost/object-store.yaml
diff --git a/kube/services/kubecost/values.yaml b/kube/services/kubecost/values.yaml
new file mode 100644
index 000000000..d1ac47246
--- /dev/null
+++ b/kube/services/kubecost/values.yaml
@@ -0,0 +1,183 @@
+#kubecostToken: KUBECOST_TOKEN
+
+global:
+ grafana:
+ enabled: false
+ proxy: false
+pricingCsv:
+ enabled: false
+ location:
+ provider: "AWS"
+ region: "us-east-1"
+ URI: s3://kc-csv-test/pricing_schema.csv # a valid file URI
+ csvAccessCredentials: pricing-schema-access-secret
+
+tolerations:
+- key: "role"
+ operator: "Equal"
+ value: "prometheus"
+ effect: "NoSchedule"
+
+nodeSelector: {}
+
+affinity: {}
+
+# If true, creates a PriorityClass to be used by the cost-analyzer pod
+priority:
+ enabled: false
+
+# If true, enable creation of NetworkPolicy resources.
+networkPolicy:
+ enabled: false
+
+podSecurityPolicy:
+ enabled: false
+
+# Enable this flag if you need to install with specfic image tags
+# imageVersion: prod-1.97.0
+
+kubecostFrontend:
+ image: public.ecr.aws/kubecost/frontend
+ imagePullPolicy: Always
+ resources:
+ requests:
+ cpu: "10m"
+ memory: "55Mi"
+ #limits:
+ # cpu: "100m"
+ # memory: "256Mi"
+
+kubecostModel:
+ image: public.ecr.aws/kubecost/cost-model
+ imagePullPolicy: Always
+ warmCache: true
+ warmSavingsCache: true
+ etl: true
+ # The total number of days the ETL storage will build
+ etlStoreDurationDays: 120
+ maxQueryConcurrency: 5
+ # utcOffset represents a timezone in hours and minutes east (+) or west (-)
+ # of UTC, itself, which is defined as +00:00.
+ # See the tz database of timezones to look up your local UTC offset:
+ # https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
+ utcOffset: "+00:00"
+ resources:
+ requests:
+ cpu: "500m"
+ memory: "256Mi"
+ #limits:
+ # cpu: "800m"
+ # memory: "256Mi"
+
+# Define persistence volume for cost-analyzer
+persistentVolume:
+ size: 32Gi
+ dbSize: 32.0Gi
+ enabled: true # Note that setting this to false means configurations will be wiped out on pod restart.
+ # storageClass: "-" #
+ # existingClaim: kubecost-cost-analyzer # a claim in the same namespace as kubecost
+
+service:
+ type: ClusterIP
+ port: 9090
+ targetPort: 9090
+ # nodePort:
+ labels: {}
+ annotations: {}
+
+prometheus:
+ server:
+ # If clusterIDConfigmap is defined, instead use user-generated configmap with key CLUSTER_ID
+ # to use as unique cluster ID in kubecost cost-analyzer deployment.
+ # This overrides the cluster_id set in prometheus.server.global.external_labels.
+ # NOTE: This does not affect the external_labels set in prometheus config.
+ # clusterIDConfigmap: cluster-id-configmap
+ image:
+ repository: public.ecr.aws/kubecost/prometheus
+ tag: v2.35.0
+ resources:
+ requests:
+ memory: 3Gi
+ # requests:
+ # cpu: 500m
+ # memory: 30Gi
+ global:
+ scrape_interval: 1m
+ scrape_timeout: 10s
+ evaluation_interval: 1m
+ external_labels:
+ cluster_id: kubecost
+ persistentVolume:
+ size: 32Gi
+ enabled: true
+ extraArgs:
+ query.max-concurrency: 1
+ query.max-samples: 100000000
+ tolerations:
+ - key: "role"
+ operator: "Equal"
+ value: "prometheus"
+ effect: "NoSchedule"
+
+ configmapReload:
+ prometheus:
+ ## If false, the configmap-reload container will not be deployed
+ ##
+ enabled: false
+
+ ## configmap-reload container name
+ ##
+ name: configmap-reload
+ ## configmap-reload container image
+ ##
+ image:
+ repository: public.ecr.aws/bitnami/configmap-reload
+ tag: 0.7.1
+ pullPolicy: IfNotPresent
+ ## Additional configmap-reload container arguments
+ ##
+ extraArgs: {}
+ ## Additional configmap-reload volume directories
+ ##
+ extraVolumeDirs: []
+ ## Additional configmap-reload mounts
+ ##
+ extraConfigmapMounts: []
+ # - name: prometheus-alerts
+ # mountPath: /etc/alerts.d
+ # subPath: ""
+ # configMap: prometheus-alerts
+ # readOnly: true
+ ## configmap-reload resource requests and limits
+ ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
+ ##
+ resources: {}
+
+ kube-state-metrics:
+ disabled: false
+ nodeExporter:
+ enabled: false
+
+reporting:
+ productAnalytics: false
+
+serviceAccount:
+ create: true # Set this to false if you're bringing your own service account.
+ annotations:
+ KUBECOST_SA
+
+kubecostProductConfigs:
+ athenaBucketName: s3://ATHENA_BUCKET
+ athenaRegion: AWS_REGION
+ athenaDatabase: ATHENA_DATABASE
+ athenaTable: ATHENA_TABLE
+ athenaProjectID: AWS_ACCOUNT_ID
+ clusterName: kubecost
+ #serviceKeySecretName: aws-service-key , might work with SA attached instead
+ projectID: AWS_ACCOUNT_ID
+ awsSpotDataRegion: AWS_REGION
+ awsSpotDataBucket: ATHENA_BUCKET
+ awsSpotDataPrefix: "spot-feed"
+
+networkCosts:
+ enabled: true
diff --git a/kube/services/manifestservice/manifestservice-deploy.yaml b/kube/services/manifestservice/manifestservice-deploy.yaml
index 52460cfbf..0966f2480 100644
--- a/kube/services/manifestservice/manifestservice-deploy.yaml
+++ b/kube/services/manifestservice/manifestservice-deploy.yaml
@@ -22,12 +22,14 @@ spec:
s3: "yes"
public: "yes"
userhelper: "yes"
+ netvpc: "yes"
GEN3_DATE_LABEL
spec:
+ serviceAccountName: manifestservice-sa
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -36,6 +38,22 @@ spec:
values:
- manifestservice
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
automountServiceAccountToken: false
volumes:
- name: config-volume
@@ -83,10 +101,9 @@ spec:
imagePullPolicy: Always
resources:
requests:
- cpu: 0.5
- memory: 512Mi
+ cpu: 100m
+ memory: 300Mi
limits:
- cpu: 1
memory: 1024Mi
livenessProbe:
httpGet:
diff --git a/kube/services/mariner/mariner-deploy.yaml b/kube/services/mariner/mariner-deploy.yaml
index 0912ea705..ec4b8a0d4 100644
--- a/kube/services/mariner/mariner-deploy.yaml
+++ b/kube/services/mariner/mariner-deploy.yaml
@@ -37,7 +37,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -46,6 +46,22 @@ spec:
values:
- mariner
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: true
containers:
- name: mariner
diff --git a/kube/services/metadata/metadata-deploy.yaml b/kube/services/metadata/metadata-deploy.yaml
index c4842dadc..72986e795 100644
--- a/kube/services/metadata/metadata-deploy.yaml
+++ b/kube/services/metadata/metadata-deploy.yaml
@@ -29,7 +29,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -38,6 +38,22 @@ spec:
values:
- metadata
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
automountServiceAccountToken: false
volumes:
- name: config-volume-g3auto
@@ -75,6 +91,12 @@ spec:
name: manifest-metadata
key: AGG_MDS_NAMESPACE
optional: true
+ - name: AGG_MDS_DEFAULT_DATA_DICT_FIELD
+ valueFrom:
+ configMapKeyRef:
+ name: manifest-metadata
+ key: AGG_MDS_DEFAULT_DATA_DICT_FIELD
+ optional: true
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -104,11 +126,10 @@ spec:
subPath: json
resources:
requests:
- cpu: 0.4
- memory: 512Mi
+ cpu: 100m
+ memory: 128Mi
limits:
- cpu: 1
- memory: 2048Mi
+ memory: 512Mi
initContainers:
- name: metadata-db-migrate
GEN3_METADATA_IMAGE
diff --git a/kube/services/metrics-server/components.yaml b/kube/services/metrics-server/components.yaml
index 743d61965..a683ca0d6 100644
--- a/kube/services/metrics-server/components.yaml
+++ b/kube/services/metrics-server/components.yaml
@@ -1,22 +1,81 @@
-# Copied contents from here: https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.3.7/components.yaml
+# Copied contents from here: https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.6.2/components.yaml
# https://github.com/kubernetes-sigs/metrics-server/releases for more information on installation of a different version.
---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ k8s-app: metrics-server
+ name: metrics-server
+ namespace: kube-system
+---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
- name: system:aggregated-metrics-reader
labels:
- rbac.authorization.k8s.io/aggregate-to-view: "true"
- rbac.authorization.k8s.io/aggregate-to-edit: "true"
+ k8s-app: metrics-server
rbac.authorization.k8s.io/aggregate-to-admin: "true"
+ rbac.authorization.k8s.io/aggregate-to-edit: "true"
+ rbac.authorization.k8s.io/aggregate-to-view: "true"
+ name: system:aggregated-metrics-reader
rules:
-- apiGroups: ["metrics.k8s.io"]
- resources: ["pods", "nodes"]
- verbs: ["get", "list", "watch"]
+- apiGroups:
+ - metrics.k8s.io
+ resources:
+ - pods
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ k8s-app: metrics-server
+ name: system:metrics-server
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - nodes/metrics
+ - nodes/stats
+ verbs:
+ - get
+- apiGroups:
+ - ""
+ resources:
+ - pods
+ - nodes
+ - namespaces
+ - configmaps
+ verbs:
+ - get
+ - list
+ - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ labels:
+ k8s-app: metrics-server
+ name: metrics-server-auth-reader
+ namespace: kube-system
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: extension-apiserver-authentication-reader
+subjects:
+- kind: ServiceAccount
+ name: metrics-server
+ namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
+ labels:
+ k8s-app: metrics-server
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
@@ -28,126 +87,116 @@ subjects:
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
+kind: ClusterRoleBinding
metadata:
- name: metrics-server-auth-reader
- namespace: kube-system
+ labels:
+ k8s-app: metrics-server
+ name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
- kind: Role
- name: extension-apiserver-authentication-reader
+ kind: ClusterRole
+ name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
-apiVersion: apiregistration.k8s.io/v1beta1
-kind: APIService
-metadata:
- name: v1beta1.metrics.k8s.io
-spec:
- service:
- name: metrics-server
- namespace: kube-system
- group: metrics.k8s.io
- version: v1beta1
- insecureSkipTLSVerify: true
- groupPriorityMinimum: 100
- versionPriority: 100
----
apiVersion: v1
-kind: ServiceAccount
+kind: Service
metadata:
+ labels:
+ k8s-app: metrics-server
name: metrics-server
namespace: kube-system
+spec:
+ ports:
+ - name: https
+ port: 443
+ protocol: TCP
+ targetPort: https
+ selector:
+ k8s-app: metrics-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
- name: metrics-server
- namespace: kube-system
labels:
k8s-app: metrics-server
+ name: metrics-server
+ namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: metrics-server
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 0
template:
metadata:
- name: metrics-server
labels:
k8s-app: metrics-server
spec:
- serviceAccountName: metrics-server
- volumes:
- # mount in tmp so we can safely use from-scratch images and/or read-only containers
- - name: tmp-dir
- emptyDir: {}
containers:
- - name: metrics-server
- image: k8s.gcr.io/metrics-server/metrics-server:v0.3.7
+ - args:
+ - --cert-dir=/tmp
+ - --secure-port=4443
+ - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
+ - --kubelet-use-node-status-port
+ - --metric-resolution=15s
+ image: k8s.gcr.io/metrics-server/metrics-server:v0.6.2
imagePullPolicy: IfNotPresent
- args:
- - --cert-dir=/tmp
- - --secure-port=4443
- - --v=2
+ livenessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /livez
+ port: https
+ scheme: HTTPS
+ periodSeconds: 10
+ name: metrics-server
ports:
- - name: main-port
- containerPort: 4443
+ - containerPort: 4443
+ name: https
protocol: TCP
+ readinessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /readyz
+ port: https
+ scheme: HTTPS
+ initialDelaySeconds: 20
+ periodSeconds: 10
+ resources:
+ requests:
+ cpu: 100m
+ memory: 200Mi
securityContext:
+ allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
volumeMounts:
- - name: tmp-dir
- mountPath: /tmp
+ - mountPath: /tmp
+ name: tmp-dir
nodeSelector:
kubernetes.io/os: linux
+ priorityClassName: system-cluster-critical
+ serviceAccountName: metrics-server
+ volumes:
+ - emptyDir: {}
+ name: tmp-dir
---
-apiVersion: v1
-kind: Service
+apiVersion: apiregistration.k8s.io/v1
+kind: APIService
metadata:
- name: metrics-server
- namespace: kube-system
labels:
- kubernetes.io/name: "Metrics-server"
- kubernetes.io/cluster-service: "true"
-spec:
- selector:
k8s-app: metrics-server
- ports:
- - port: 443
- protocol: TCP
- targetPort: main-port
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
- name: system:metrics-server
-rules:
-- apiGroups:
- - ""
- resources:
- - pods
- - nodes
- - nodes/stats
- - namespaces
- - configmaps
- verbs:
- - get
- - list
- - watch
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
- name: system:metrics-server
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: system:metrics-server
-subjects:
-- kind: ServiceAccount
- name: metrics-server
- namespace: kube-system
\ No newline at end of file
+ name: v1beta1.metrics.k8s.io
+spec:
+ group: metrics.k8s.io
+ groupPriorityMinimum: 100
+ insecureSkipTLSVerify: true
+ service:
+ name: metrics-server
+ namespace: kube-system
+ version: v1beta1
+ versionPriority: 100
diff --git a/kube/services/monitoring/prometheus-application.yaml b/kube/services/monitoring/prometheus-application.yaml
new file mode 100644
index 000000000..75b085719
--- /dev/null
+++ b/kube/services/monitoring/prometheus-application.yaml
@@ -0,0 +1,24 @@
+apiVersion: argoproj.io/v1alpha1
+kind: Application
+metadata:
+ name: prometheus-application
+ namespace: argocd
+spec:
+ project: default
+ source:
+ chart: kube-prometheus-stack
+ repoURL: https://prometheus-community.github.io/helm-charts
+ targetRevision: 43.1.3
+ helm:
+ valueFiles:
+ - https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/kube/services/monitoring/values.yaml
+ releaseName: prometheus
+ destination:
+ server: 'https://kubernetes.default.svc'
+ namespace: monitoring
+ syncPolicy:
+ automated:
+ prune: true
+ selfHeal: true
+ syncOptions:
+ - CreateNamespace=true
\ No newline at end of file
diff --git a/kube/services/monitoring/prometheus-values.yaml b/kube/services/monitoring/prometheus-values.yaml
index 9ae425abd..e49bfba09 100644
--- a/kube/services/monitoring/prometheus-values.yaml
+++ b/kube/services/monitoring/prometheus-values.yaml
@@ -1261,4 +1261,4 @@ extraScrapeConfigs:
networkPolicy:
## Enable creation of NetworkPolicy resources.
##
- enabled: false
+ enabled: false
\ No newline at end of file
diff --git a/kube/services/monitoring/thanos-deploy.yaml b/kube/services/monitoring/thanos-deploy.yaml
index 74c98dc19..f5c07a656 100644
--- a/kube/services/monitoring/thanos-deploy.yaml
+++ b/kube/services/monitoring/thanos-deploy.yaml
@@ -19,12 +19,14 @@ spec:
spec:
containers:
- name: thanos-query
- image: quay.io/thanos/thanos:v0.23.0
+ image: quay.io/thanos/thanos:v0.25.2
args:
- 'query'
- '--log.level=debug'
- '--query.replica-label=prometheus_replica'
- '--store=prometheus-kube-prometheus-thanos-discovery.monitoring.svc:10901'
+ - '--web.external-prefix=/thanos-query/'
+ - '--web.route-prefix=/thanos-query/'
resources:
requests:
cpu: '100m'
@@ -98,9 +100,10 @@ spec:
labels:
app: thanos-store
spec:
+ serviceAccount: thanos
containers:
- name: thanos-store
- image: quay.io/thanos/thanos:v0.23.0
+ image: quay.io/thanos/thanos:v0.25.2
args:
- 'store'
- '--log.level=debug'
@@ -162,15 +165,17 @@ spec:
labels:
app: thanos-compactor
spec:
+ serviceAccount: thanos
containers:
- name: thanos-compactor
- image: quay.io/thanos/thanos:v0.23.0
+ image: quay.io/thanos/thanos:v0.25.2
args:
- 'compact'
- '--log.level=debug'
- '--data-dir=/var/thanos/store'
- '--objstore.config-file=/config/thanos.yaml'
- '--wait'
+ - '--web.external-prefix=/thanos-compactor/'
ports:
- name: http
containerPort: 10902
diff --git a/kube/services/monitoring/values.yaml b/kube/services/monitoring/values.yaml
index 25208c9b6..d93e5098a 100644
--- a/kube/services/monitoring/values.yaml
+++ b/kube/services/monitoring/values.yaml
@@ -249,9 +249,7 @@ alertmanager:
# For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
# See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
- # ingressClassName: nginx
-
- annotations: {}
+ #ingressClassName: nginx
labels: {}
@@ -261,8 +259,8 @@ alertmanager:
## Hosts must be provided if Ingress is enabled.
##
hosts: []
- #- prometheus.emalinowskiv1.planx-pla.net
- # - alertmanager.domain.com
+ #- prometheus.emalinowskiv1.planx-pla.net
+ # - alertmanager.domain.com
## Paths to use for ingress rules - one path should match the alertmanagerSpec.routePrefix
##
@@ -452,7 +450,7 @@ alertmanager:
## Image of Alertmanager
##
image:
- repository: quay.io/prometheus/alertmanager
+ repository: prometheus/alertmanager
tag: v0.24.0
sha: ""
@@ -562,7 +560,7 @@ alertmanager:
## The route prefix Alertmanager registers HTTP handlers for. This is useful, if using ExternalURL and a proxy is rewriting HTTP routes of a request, and the actual ExternalURL is still true,
## but the server serves requests under a different route prefix. For example for use with kubectl proxy.
##
- routePrefix: /
+ routePrefix: /alertmanager/
## If set to true all actions on the underlying managed objects are not going to be performed, except for delete actions.
##
@@ -731,18 +729,19 @@ grafana:
ingress:
## If true, Grafana Ingress will be created
##
- enabled: false
+ enabled: true
## IngressClassName for Grafana Ingress.
## Should be provided if Ingress is enable.
##
- # ingressClassName: nginx
+ ingressClassName: nginx
## Annotations for Grafana Ingress
##
- annotations: {}
- # kubernetes.io/ingress.class: nginx
- # kubernetes.io/tls-acme: "true"
+ annotations:
+ kubernetes.io/ingress.class: nginx
+ nginx.ingress.kubernetes.io/rewrite-target: /$1
+ nginx.ingress.kubernetes.io/use-regex: "true"
## Labels to be added to the Ingress
##
@@ -754,9 +753,14 @@ grafana:
# hosts:
# - grafana.domain.com
hosts: []
+ # - data.bloodpac.org
## Path for grafana ingress
- path: /
+ path: /grafana/?(.*)
+
+ grafana.ini:
+ server:
+ root_url: http://localhost:3000/grafana # this host can be localhost
## TLS configuration for grafana Ingress
## Secret must be manually created in the namespace
@@ -1536,6 +1540,15 @@ prometheus-node-exporter:
- --collector.filesystem.fs-types-exclude=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$
service:
portName: http-metrics
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: "eks.amazonaws.com/compute-type"
+ operator: NotIn
+ values:
+ - fargate
prometheus:
monitor:
enabled: true
@@ -1608,7 +1621,7 @@ prometheusOperator:
patch:
enabled: true
image:
- repository: k8s.gcr.io/ingress-nginx/kube-webhook-certgen
+ repository: ingress-nginx/kube-webhook-certgen
tag: v1.1.1
sha: ""
pullPolicy: IfNotPresent
@@ -1838,7 +1851,7 @@ prometheusOperator:
## Prometheus-operator image
##
image:
- repository: quay.io/prometheus-operator/prometheus-operator
+ repository: prometheus-operator/prometheus-operator
tag: v0.57.0
sha: ""
pullPolicy: IfNotPresent
@@ -1856,7 +1869,7 @@ prometheusOperator:
prometheusConfigReloader:
# image to use for config and rule reloading
image:
- repository: quay.io/prometheus-operator/prometheus-config-reloader
+ repository: prometheus-operator/prometheus-config-reloader
tag: v0.57.0
sha: ""
@@ -1872,7 +1885,7 @@ prometheusOperator:
## Thanos side-car image when configured
##
thanosImage:
- repository: quay.io/thanos/thanos
+ repository: thanos/thanos
tag: v0.25.2
sha: ""
@@ -2297,7 +2310,7 @@ prometheus:
## Image of Prometheus.
##
image:
- repository: quay.io/prometheus/prometheus
+ repository: prometheus/prometheus
tag: v2.36.1
sha: ""
@@ -2362,7 +2375,7 @@ prometheus:
## External URL at which Prometheus will be reachable.
##
- externalUrl: ""
+ externalUrl: "/prometheus/"
## Define which Nodes the Pods are scheduled on.
## ref: https://kubernetes.io/docs/user-guide/node-selection/
@@ -2591,7 +2604,7 @@ prometheus:
accessModes: ["ReadWriteOnce"]
resources:
requests:
- storage: 500Gi
+ storage: 80Gi
#selector: {}
## Using tmpfs volume
@@ -3293,7 +3306,7 @@ thanosRuler:
## Image of ThanosRuler
##
image:
- repository: quay.io/thanos/thanos
+ repository: thanos/thanos
tag: v0.24.0
sha: ""
@@ -3361,12 +3374,12 @@ thanosRuler:
## The external URL the Thanos Ruler instances will be available under. This is necessary to generate correct URLs. This is necessary if Thanos Ruler is not served from root of a DNS name. string false
##
- externalPrefix:
+ externalPrefix: /thanos
## The route prefix ThanosRuler registers HTTP handlers for. This is useful, if using ExternalURL and a proxy is rewriting HTTP routes of a request, and the actual ExternalURL is still true,
## but the server serves requests under a different route prefix. For example for use with kubectl proxy.
##
- routePrefix: /
+ routePrefix:
## ObjectStorageConfig configures object storage in Thanos. Alternative to
## ObjectStorageConfigFile, and lower order priority.
diff --git a/kube/services/netpolicy/gen3/services/argocd_netpolicy.yaml b/kube/services/netpolicy/gen3/services/argocd_netpolicy.yaml
new file mode 100644
index 000000000..ced3e4a20
--- /dev/null
+++ b/kube/services/netpolicy/gen3/services/argocd_netpolicy.yaml
@@ -0,0 +1,33 @@
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: netpolicy-argocd
+spec:
+ podSelector:
+ matchExpressions:
+ - key: app
+ operator: In
+ values:
+ - revproxy
+ - cohort-middleware
+ - wts
+ ingress:
+ - from:
+ - ipBlock:
+ cidr: 0.0.0.0/0
+ ports:
+ - port: 80
+ - port: 4000
+ - port: 8080
+ - port: 81
+ - port: 82
+ - port: 443
+ egress:
+ - to:
+ - namespaceSelector:
+ matchLabels:
+ app: argocd
+ policyTypes:
+ - Ingress
+ - Egress
diff --git a/kube/services/netpolicy/gen3/services/datadog_netpolicy.yaml b/kube/services/netpolicy/gen3/services/datadog_netpolicy.yaml
new file mode 100644
index 000000000..87b71392f
--- /dev/null
+++ b/kube/services/netpolicy/gen3/services/datadog_netpolicy.yaml
@@ -0,0 +1,25 @@
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: netpolicy-datadog
+spec:
+ podSelector:
+ matchExpressions:
+ - key: app
+ operator: In
+ values:
+ - cohort-middleware
+ ingress:
+ - from:
+ - ipBlock:
+ cidr: 0.0.0.0/0
+ ports:
+ - port: 8126
+ egress:
+ - to:
+ - namespaceSelector:
+ matchLabels:
+ app: datadog
+ policyTypes:
+ - Ingress
+ - Egress
\ No newline at end of file
diff --git a/kube/services/netpolicy/gen3/services/revproxy_netpolicy.yaml b/kube/services/netpolicy/gen3/services/revproxy_netpolicy.yaml
index 3cfe88d1f..2afecf049 100644
--- a/kube/services/netpolicy/gen3/services/revproxy_netpolicy.yaml
+++ b/kube/services/netpolicy/gen3/services/revproxy_netpolicy.yaml
@@ -29,6 +29,7 @@ spec:
- port: 82
- port: 443
- port: 8088
+ - port: 9090
egress:
- to:
- namespaceSelector:
@@ -42,6 +43,14 @@ spec:
- namespaceSelector:
matchLabels:
app: argo
+ - to:
+ - namespaceSelector:
+ matchLabels:
+ app: argocd
+ - to:
+ - namespaceSelector:
+ matchLabels:
+ name: kubecost
policyTypes:
- Ingress
- Egress
diff --git a/kube/services/netpolicy/gen3/services/sower_netpolicy.yaml b/kube/services/netpolicy/gen3/services/sower_netpolicy.yaml
index 7ad51caca..93c2de3c3 100644
--- a/kube/services/netpolicy/gen3/services/sower_netpolicy.yaml
+++ b/kube/services/netpolicy/gen3/services/sower_netpolicy.yaml
@@ -3,7 +3,6 @@ kind: NetworkPolicy
metadata:
name: netpolicy-sowerjob
spec:
- spec:
podSelector:
matchLabels:
app: sowerjob
diff --git a/kube/services/netpolicy/gen3/services/ssjdispatcherjob_netpolicy.yaml b/kube/services/netpolicy/gen3/services/ssjdispatcherjob_netpolicy.yaml
index 7b1f85c29..bd6e03f05 100644
--- a/kube/services/netpolicy/gen3/services/ssjdispatcherjob_netpolicy.yaml
+++ b/kube/services/netpolicy/gen3/services/ssjdispatcherjob_netpolicy.yaml
@@ -3,7 +3,6 @@ kind: NetworkPolicy
metadata:
name: netpolicy-ssjdispatcherjob
spec:
- spec:
podSelector:
matchLabels:
app: ssjdispatcherjob
diff --git a/kube/services/node-monitors/application.yaml b/kube/services/node-monitors/application.yaml
new file mode 100644
index 000000000..0748f7c35
--- /dev/null
+++ b/kube/services/node-monitors/application.yaml
@@ -0,0 +1,22 @@
+apiVersion: argoproj.io/v1alpha1
+kind: Application
+metadata:
+ name: node-monitor-application
+ namespace: argocd
+spec:
+ destination:
+ namespace: default
+ server: https://kubernetes.default.svc
+ project: default
+ source:
+ repoURL: https://github.com/uc-cdis/cloud-automation.git
+ targetRevision: master
+ path: kube/services/node-monitors/
+ directory:
+ exclude: "application.yaml"
+ syncPolicy:
+ automated:
+ prune: true
+ selfHeal: true
+ syncOptions:
+ - CreateNamespace=true
diff --git a/kube/services/node-monitors/argo-monitors/application.yaml b/kube/services/node-monitors/argo-monitors/application.yaml
new file mode 100644
index 000000000..fca4ace86
--- /dev/null
+++ b/kube/services/node-monitors/argo-monitors/application.yaml
@@ -0,0 +1,22 @@
+apiVersion: argoproj.io/v1alpha1
+kind: Application
+metadata:
+ name: node-monitor-argo-application
+ namespace: argocd
+spec:
+ destination:
+ namespace: default
+ server: https://kubernetes.default.svc
+ project: default
+ source:
+ repoURL: https://github.com/uc-cdis/cloud-automation.git
+ targetRevision: master
+ path: kube/services/node-monitors/argo-monitors/
+ directory:
+ exclude: "application.yaml"
+ syncPolicy:
+ automated:
+ prune: true
+ selfHeal: true
+ syncOptions:
+ - CreateNamespace=true
diff --git a/kube/services/node-monitors/argo-monitors/argo-node-age.yaml b/kube/services/node-monitors/argo-monitors/argo-node-age.yaml
new file mode 100644
index 000000000..890495ee0
--- /dev/null
+++ b/kube/services/node-monitors/argo-monitors/argo-node-age.yaml
@@ -0,0 +1,58 @@
+apiVersion: batch/v1
+kind: CronJob
+metadata:
+ name: argo-node-age
+ namespace: default
+spec:
+ schedule: "*/5 * * * *"
+ jobTemplate:
+ spec:
+ template:
+ metadata:
+ labels:
+ app: gen3job
+ spec:
+ serviceAccountName: node-monitor
+ containers:
+ - name: kubectl
+ image: quay.io/cdis/awshelper
+ env:
+ # This is the label we want to monitor, probably will never need to change
+ - name: NODE_LABEL
+ value: purpose=workflow
+ # This is 3 * 3600, or 3 hours
+ - name: THRESHOLD_TIME
+ value: "10800"
+ - name: SLACK_WEBHOOK_URL
+ valueFrom:
+ configMapKeyRef:
+ name: global
+ key: slack_webhook
+
+ command: ["/bin/bash"]
+ args:
+ - "-c"
+ - |
+ #!/bin/bash
+ # Get all nodes with specific label and check their age
+ kubectl get nodes -l "$NODE_LABEL" -o json | jq -c '.items[] | {name: .metadata.name, creationTimestamp: .metadata.creationTimestamp}' | while read node_info; do
+ NODE_NAME=$(echo $node_info | jq -r '.name')
+ CREATION_TIMESTAMP=$(echo $node_info | jq -r '.creationTimestamp')
+
+ # Convert creation timestamp to Unix Epoch time
+ CREATION_EPOCH=$(date -d "$CREATION_TIMESTAMP" +%s)
+
+ # Get current Unix Epoch time
+ CURRENT_EPOCH=$(date +%s)
+
+ # Calculate node age in seconds
+ NODE_AGE=$(($CURRENT_EPOCH - $CREATION_EPOCH))
+
+ # Check if node age is greater than threshold
+ if [ "$NODE_AGE" -gt "$THRESHOLD_TIME" ]; then
+ echo "Node $NODE_NAME has been around too long, sending an alert"
+ # Send alert to Slack
+ curl -X POST -H 'Content-type: application/json' --data "{\"text\":\"WARNING: Node \`${NODE_NAME}\` is older than 3 hours!\"}" $SLACK_WEBHOOK_URL
+ fi
+ done
+ restartPolicy: OnFailure
\ No newline at end of file
diff --git a/kube/services/node-monitors/auth.yaml b/kube/services/node-monitors/auth.yaml
new file mode 100644
index 000000000..72560cddc
--- /dev/null
+++ b/kube/services/node-monitors/auth.yaml
@@ -0,0 +1,18 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: node-monitor
+ namespace: default
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: node-monitor-binding
+subjects:
+ - kind: ServiceAccount
+ name: node-monitor
+ namespace: default
+roleRef:
+ kind: ClusterRole
+ name: system:node
+ apiGroup: rbac.authorization.k8s.io
diff --git a/kube/services/node-monitors/fenceshib-jenkins-test.yaml b/kube/services/node-monitors/fenceshib-jenkins-test.yaml
new file mode 100644
index 000000000..e9e27af98
--- /dev/null
+++ b/kube/services/node-monitors/fenceshib-jenkins-test.yaml
@@ -0,0 +1,40 @@
+apiVersion: batch/v1
+kind: CronJob
+metadata:
+ name: fenceshib-service-check
+ namespace: default
+spec:
+ schedule: "0 */4 * * *"
+ jobTemplate:
+ spec:
+ template:
+ metadata:
+ labels:
+ app: gen3job
+ spec:
+ serviceAccountName: node-monitor
+ containers:
+ - name: kubectl
+ image: quay.io/cdis/awshelper
+ env:
+ - name: SLACK_WEBHOOK_URL
+ valueFrom:
+ configMapKeyRef:
+ name: global
+ key: slack_webhook
+ command: ["/bin/bash"]
+ args:
+ - "-c"
+ - |
+ #!/bin/bash
+
+ fenceshib=$(kubectl get services -A | grep "fenceshib-service" | awk '{print $2}')
+
+ # Check if there are any fenceshib services
+ if [[ ! -z "$fenceshib" ]]; then
+ echo "Alert: Service fenceshib-service found with output: $fenceshib"
+ curl -X POST -H 'Content-type: application/json' --data "{\"text\": \"WARNING: Fenceshib service discovered in qaplanetv1 cluster. This could cause issues with future CI runs. Please delete this service if it is not needed. Run the following in qaplanetv1 to see which namespace it is in: \`kubectl get services -A | grep "fenceshib-service"\`\"}" $SLACK_WEBHOOK_URL
+ else
+ echo "Fenceshib Service Not Found"
+ fi
+ restartPolicy: OnFailure
diff --git a/kube/services/node-monitors/node-not-ready.yaml b/kube/services/node-monitors/node-not-ready.yaml
new file mode 100644
index 000000000..500832fc3
--- /dev/null
+++ b/kube/services/node-monitors/node-not-ready.yaml
@@ -0,0 +1,48 @@
+apiVersion: batch/v1
+kind: CronJob
+metadata:
+ name: node-not-ready-cron
+ namespace: default
+spec:
+ schedule: "*/30 * * * *"
+ jobTemplate:
+ spec:
+ template:
+ metadata:
+ labels:
+ app: gen3job
+ spec:
+ serviceAccountName: node-monitor
+ containers:
+ - name: kubectl
+ image: quay.io/cdis/awshelper
+ env:
+ - name: SLACK_WEBHOOK_URL
+ valueFrom:
+ configMapKeyRef:
+ name: global
+ key: slack_webhook
+ - name: ENVIRONMENT
+ valueFrom:
+ configMapKeyRef:
+ name: global
+ key: environment
+
+ command: ["/bin/bash"]
+ args:
+ - "-c"
+ - |
+ #!/bin/sh
+
+ # Get nodes that show "NodeStatusNeverUpdated"
+ NODES=$(kubectl get nodes -o json | jq -r '.items[] | select(.status.conditions[] | select(.type == "Ready" and .status == "Unknown")) | .metadata.name')
+
+ if [ -n "$NODES" ]; then
+ echo "Nodes reporting 'NodeStatusNeverUpdated', sending an alert:"
+ echo "$NODES"
+ # Send alert to Slack
+ curl -X POST -H 'Content-type: application/json' --data "{\"text\":\"WARNING: Node \`${NODES}\` is stuck in "NotReady" in \`${ENVIRONMENT}\`! \"}" $SLACK_WEBHOOK_URL
+ else
+ echo "No nodes reporting 'NodeStatusNeverUpdated'"
+ fi
+ restartPolicy: OnFailure
diff --git a/kube/services/ohdsi-atlas/ohdsi-atlas-deploy.yaml b/kube/services/ohdsi-atlas/ohdsi-atlas-deploy.yaml
index abb611e39..62265503e 100644
--- a/kube/services/ohdsi-atlas/ohdsi-atlas-deploy.yaml
+++ b/kube/services/ohdsi-atlas/ohdsi-atlas-deploy.yaml
@@ -23,7 +23,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -32,6 +32,22 @@ spec:
values:
- ohdsi-atlas
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
volumes:
- name: ohdsi-atlas-config-local
@@ -56,13 +72,12 @@ spec:
volumeMounts:
- name: ohdsi-atlas-config-local
readOnly: true
- mountPath: /usr/share/nginx/html/atlas/js/config-local.js
+ mountPath: /etc/atlas/config-local.js
subPath: config-local.js
imagePullPolicy: Always
resources:
requests:
- cpu: 1
- memory: 1Gi
+ cpu: 100m
+ memory: 100Mi
limits:
- cpu: 1
- memory: 1Gi
+ memory: 500Mi
diff --git a/kube/services/ohdsi-webapi/ohdsi-webapi-config.yaml b/kube/services/ohdsi-webapi/ohdsi-webapi-config.yaml
index 5cd46edd9..8eb01ec08 100644
--- a/kube/services/ohdsi-webapi/ohdsi-webapi-config.yaml
+++ b/kube/services/ohdsi-webapi/ohdsi-webapi-config.yaml
@@ -55,6 +55,9 @@ stringData:
security_oauth_callback_api: https://atlas.$hostname/WebAPI/user/oauth/callback
security_oauth_callback_urlResolver: query
+ security_ohdsi_custom_authorization_mode: teamproject
+ security_ohdsi_custom_authorization_url: $ARBORIST_URL/auth/mapping
+
logging_level_root: info
logging_level_org_ohdsi: info
logging_level_org_apache_shiro: info
diff --git a/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml b/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml
index 2f4e57d47..a729ae7c4 100644
--- a/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml
+++ b/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml
@@ -26,7 +26,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -35,6 +35,22 @@ spec:
values:
- ohdsi-webapi
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
volumes:
- name: ohdsi-webapi-reverse-proxy-config
@@ -62,13 +78,12 @@ spec:
imagePullPolicy: Always
resources:
requests:
- cpu: '0.5'
- memory: 1Gi
+ cpu: 100m
+ memory: 1500Mi
limits:
- cpu: '0.5'
- memory: 1Gi
+ memory: 4Gi
- name: ohdsi-webapi-reverse-proxy
- image: nginx:1.23
+ image: 707767160287.dkr.ecr.us-east-1.amazonaws.com/gen3/nginx:1.23
ports:
- containerPort: 80
volumeMounts:
@@ -79,8 +94,7 @@ spec:
imagePullPolicy: Always
resources:
requests:
- cpu: '0.5'
- memory: 1Gi
+ cpu: 100m
+ memory: 100Mi
limits:
- cpu: '0.5'
- memory: 1Gi
+ memory: 500Mi
diff --git a/kube/services/ohif-viewer/app-config.js b/kube/services/ohif-viewer/app-config.js
new file mode 100644
index 000000000..6768726f4
--- /dev/null
+++ b/kube/services/ohif-viewer/app-config.js
@@ -0,0 +1,209 @@
+window.config = {
+ routerBasename: '/ohif-viewer/',
+ // whiteLabeling: {},
+ extensions: [],
+ modes: [],
+ customizationService: {
+ // Shows a custom route -access via http://localhost:3000/custom
+ // helloPage: '@ohif/extension-default.customizationModule.helloPage',
+ },
+ showStudyList: true,
+ // some windows systems have issues with more than 3 web workers
+ maxNumberOfWebWorkers: 3,
+ // below flag is for performance reasons, but it might not work for all servers
+ omitQuotationForMultipartRequest: true,
+ showWarningMessageForCrossOrigin: true,
+ showCPUFallbackMessage: true,
+ showLoadingIndicator: true,
+ strictZSpacingForVolumeViewport: true,
+ maxNumRequests: {
+ interaction: 100,
+ thumbnail: 75,
+ // Prefetch number is dependent on the http protocol. For http 2 or
+ // above, the number of requests can be go a lot higher.
+ prefetch: 25,
+ },
+ // filterQueryParam: false,
+ defaultDataSourceName: 'dicomweb',
+ /* Dynamic config allows user to pass "configUrl" query string this allows to load config without recompiling application. The regex will ensure valid configuration source */
+ // dangerouslyUseDynamicConfig: {
+ // enabled: true,
+ // // regex will ensure valid configuration source and default is /.*/ which matches any character. To use this, setup your own regex to choose a specific source of configuration only.
+ // // Example 1, to allow numbers and letters in an absolute or sub-path only.
+ // // regex: /(0-9A-Za-z.]+)(\/[0-9A-Za-z.]+)*/
+ // // Example 2, to restricts to either hosptial.com or othersite.com.
+ // // regex: /(https:\/\/hospital.com(\/[0-9A-Za-z.]+)*)|(https:\/\/othersite.com(\/[0-9A-Za-z.]+)*)/
+ // regex: /.*/,
+ // },
+ dataSources: [
+ {
+ friendlyName: 'dcmjs DICOMWeb Server',
+ namespace: '@ohif/extension-default.dataSourcesModule.dicomweb',
+ sourceName: 'dicomweb',
+ configuration: {
+ name: 'dicomweb',
+ wadoUriRoot: '$DICOM_SERVER_URL/wado',
+ qidoRoot: '$DICOM_SERVER_URL/dicom-web',
+ wadoRoot: '$DICOM_SERVER_URL/dicom-web',
+
+ qidoSupportsIncludeField: false,
+ supportsReject: false,
+ imageRendering: 'wadors',
+ thumbnailRendering: 'wadors',
+ enableStudyLazyLoad: true,
+ supportsFuzzyMatching: false,
+ supportsWildcard: true,
+ staticWado: true,
+ singlepart: 'bulkdata,video',
+ // whether the data source should use retrieveBulkData to grab metadata,
+ // and in case of relative path, what would it be relative to, options
+ // are in the series level or study level (some servers like series some study)
+ bulkDataURI: {
+ enabled: true,
+ relativeResolution: 'studies',
+ },
+ },
+ },
+ {
+ friendlyName: 'dicomweb delegating proxy',
+ namespace: '@ohif/extension-default.dataSourcesModule.dicomwebproxy',
+ sourceName: 'dicomwebproxy',
+ configuration: {
+ name: 'dicomwebproxy',
+ },
+ },
+ {
+ friendlyName: 'dicom json',
+ namespace: '@ohif/extension-default.dataSourcesModule.dicomjson',
+ sourceName: 'dicomjson',
+ configuration: {
+ name: 'json',
+ },
+ },
+ {
+ friendlyName: 'dicom local',
+ namespace: '@ohif/extension-default.dataSourcesModule.dicomlocal',
+ sourceName: 'dicomlocal',
+ configuration: {},
+ },
+ ],
+ httpErrorHandler: error => {
+ // This is 429 when rejected from the public idc sandbox too often.
+ console.warn(error.status);
+
+ // Could use services manager here to bring up a dialog/modal if needed.
+ console.warn('test, navigate to https://ohif.org/');
+ },
+ // whiteLabeling: {
+ // /* Optional: Should return a React component to be rendered in the "Logo" section of the application's Top Navigation bar */
+ // createLogoComponentFn: function (React) {
+ // return React.createElement(
+ // 'a',
+ // {
+ // target: '_self',
+ // rel: 'noopener noreferrer',
+ // className: 'text-purple-600 line-through',
+ // href: '/',
+ // },
+ // React.createElement('img',
+ // {
+ // src: './assets/customLogo.svg',
+ // className: 'w-8 h-8',
+ // }
+ // ))
+ // },
+ // },
+ hotkeys: [
+ {
+ commandName: 'incrementActiveViewport',
+ label: 'Next Viewport',
+ keys: ['right'],
+ },
+ {
+ commandName: 'decrementActiveViewport',
+ label: 'Previous Viewport',
+ keys: ['left'],
+ },
+ { commandName: 'rotateViewportCW', label: 'Rotate Right', keys: ['r'] },
+ { commandName: 'rotateViewportCCW', label: 'Rotate Left', keys: ['l'] },
+ { commandName: 'invertViewport', label: 'Invert', keys: ['i'] },
+ {
+ commandName: 'flipViewportHorizontal',
+ label: 'Flip Horizontally',
+ keys: ['h'],
+ },
+ {
+ commandName: 'flipViewportVertical',
+ label: 'Flip Vertically',
+ keys: ['v'],
+ },
+ { commandName: 'scaleUpViewport', label: 'Zoom In', keys: ['+'] },
+ { commandName: 'scaleDownViewport', label: 'Zoom Out', keys: ['-'] },
+ { commandName: 'fitViewportToWindow', label: 'Zoom to Fit', keys: ['='] },
+ { commandName: 'resetViewport', label: 'Reset', keys: ['space'] },
+ { commandName: 'nextImage', label: 'Next Image', keys: ['down'] },
+ { commandName: 'previousImage', label: 'Previous Image', keys: ['up'] },
+ // {
+ // commandName: 'previousViewportDisplaySet',
+ // label: 'Previous Series',
+ // keys: ['pagedown'],
+ // },
+ // {
+ // commandName: 'nextViewportDisplaySet',
+ // label: 'Next Series',
+ // keys: ['pageup'],
+ // },
+ {
+ commandName: 'setToolActive',
+ commandOptions: { toolName: 'Zoom' },
+ label: 'Zoom',
+ keys: ['z'],
+ },
+ // ~ Window level presets
+ {
+ commandName: 'windowLevelPreset1',
+ label: 'W/L Preset 1',
+ keys: ['1'],
+ },
+ {
+ commandName: 'windowLevelPreset2',
+ label: 'W/L Preset 2',
+ keys: ['2'],
+ },
+ {
+ commandName: 'windowLevelPreset3',
+ label: 'W/L Preset 3',
+ keys: ['3'],
+ },
+ {
+ commandName: 'windowLevelPreset4',
+ label: 'W/L Preset 4',
+ keys: ['4'],
+ },
+ {
+ commandName: 'windowLevelPreset5',
+ label: 'W/L Preset 5',
+ keys: ['5'],
+ },
+ {
+ commandName: 'windowLevelPreset6',
+ label: 'W/L Preset 6',
+ keys: ['6'],
+ },
+ {
+ commandName: 'windowLevelPreset7',
+ label: 'W/L Preset 7',
+ keys: ['7'],
+ },
+ {
+ commandName: 'windowLevelPreset8',
+ label: 'W/L Preset 8',
+ keys: ['8'],
+ },
+ {
+ commandName: 'windowLevelPreset9',
+ label: 'W/L Preset 9',
+ keys: ['9'],
+ },
+ ],
+};
diff --git a/kube/services/ohif-viewer/ohif-viewer-deploy.yaml b/kube/services/ohif-viewer/ohif-viewer-deploy.yaml
new file mode 100644
index 000000000..e2df93cd0
--- /dev/null
+++ b/kube/services/ohif-viewer/ohif-viewer-deploy.yaml
@@ -0,0 +1,94 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: ohif-viewer-deployment
+ labels:
+ app: ohif-viewer
+spec:
+ selector:
+ matchLabels:
+ app: ohif-viewer
+ release: production
+ template:
+ metadata:
+ labels:
+ app: ohif-viewer
+ release: production
+ public: "yes"
+ GEN3_DATE_LABEL
+ spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
+ volumes:
+ - name: config-volume-g3auto
+ secret:
+ secretName: orthanc-s3-g3auto
+ containers:
+ - name: ohif-viewer
+ GEN3_OHIF-VIEWER_IMAGE
+ env:
+ - name: DD_ENABLED
+ valueFrom:
+ configMapKeyRef:
+ name: manifest-global
+ key: dd_enabled
+ optional: true
+ - name: DD_ENV
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.labels['tags.datadoghq.com/env']
+ - name: DD_SERVICE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.labels['tags.datadoghq.com/service']
+ - name: DD_VERSION
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.labels['tags.datadoghq.com/version']
+ - name: DD_LOGS_INJECTION
+ value: "true"
+ - name: DD_AGENT_HOST
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: PORT
+ value: "8080"
+ - name: PUBLIC_URL
+ value: "/ohif-viewer/"
+ imagePullPolicy: Always
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 8080
+ initialDelaySeconds: 5
+ periodSeconds: 20
+ timeoutSeconds: 30
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 8080
+ initialDelaySeconds: 5
+ periodSeconds: 60
+ timeoutSeconds: 30
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - name: config-volume-g3auto
+ readOnly: true
+ mountPath: /usr/share/nginx/html/app-config.js
+ subPath: app-config.js
diff --git a/kube/services/ohif-viewer/ohif-viewer-service.yaml b/kube/services/ohif-viewer/ohif-viewer-service.yaml
new file mode 100644
index 000000000..3da2f8176
--- /dev/null
+++ b/kube/services/ohif-viewer/ohif-viewer-service.yaml
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: ohif-viewer-service
+ labels:
+ app: ohif-viewer
+spec:
+ selector:
+ app: ohif-viewer
+ ports:
+ - protocol: TCP
+ port: 80
+ targetPort: 8080
+ nodePort: null
+ name: http
+ type: ClusterIP
diff --git a/kube/services/orthanc/orthanc-deploy.yaml b/kube/services/orthanc/orthanc-deploy.yaml
new file mode 100644
index 000000000..c04c74205
--- /dev/null
+++ b/kube/services/orthanc/orthanc-deploy.yaml
@@ -0,0 +1,103 @@
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ name: orthanc-deployment
+ labels:
+ app: orthanc
+spec:
+ selector:
+ matchLabels:
+ app: orthanc
+ release: production
+ template:
+ metadata:
+ labels:
+ app: orthanc
+ release: production
+ public: "yes"
+ s3: "yes"
+ GEN3_DATE_LABEL
+ spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
+ volumes:
+ - name: config-volume-g3auto
+ secret:
+ secretName: orthanc-s3-g3auto
+ containers:
+ - name: orthanc
+ GEN3_ORTHANC_IMAGE
+ env:
+ - name: DD_ENABLED
+ valueFrom:
+ configMapKeyRef:
+ name: manifest-global
+ key: dd_enabled
+ optional: true
+ - name: DD_ENV
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.labels['tags.datadoghq.com/env']
+ - name: DD_SERVICE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.labels['tags.datadoghq.com/service']
+ - name: DD_VERSION
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.labels['tags.datadoghq.com/version']
+ - name: DD_LOGS_INJECTION
+ value: "true"
+ - name: DD_AGENT_HOST
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: DICOM_WEB_PLUGIN_ENABLED
+ value: "true"
+ - name: TCIA_PLUGIN_ENABLED
+ value: "true"
+ - name: ORTHANC_EXPLORER_2_ENABLED
+ value: "false"
+ imagePullPolicy: Always
+ readinessProbe:
+ httpGet:
+ path: /system
+ port: 8042
+ httpHeaders:
+ - name: Authorization
+ value: Basic cHVibGljOmhlbGxv
+ initialDelaySeconds: 5
+ periodSeconds: 20
+ timeoutSeconds: 30
+ livenessProbe:
+ httpGet:
+ path: /system
+ port: 8042
+ httpHeaders:
+ - name: Authorization
+ value: Basic cHVibGljOmhlbGxv
+ initialDelaySeconds: 5
+ periodSeconds: 60
+ timeoutSeconds: 30
+ ports:
+ - containerPort: 8042
+ volumeMounts:
+ - name: config-volume-g3auto
+ readOnly: true
+ mountPath: /etc/orthanc/orthanc_config_overwrites.json
+ subPath: orthanc_config_overwrites.json
diff --git a/kube/services/orthanc/orthanc-service.yaml b/kube/services/orthanc/orthanc-service.yaml
new file mode 100644
index 000000000..e8dadd1cc
--- /dev/null
+++ b/kube/services/orthanc/orthanc-service.yaml
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: orthanc-service
+ labels:
+ app: orthanc
+spec:
+ selector:
+ app: orthanc
+ ports:
+ - protocol: TCP
+ port: 80
+ targetPort: 8042
+ nodePort: null
+ name: http
+ type: ClusterIP
diff --git a/kube/services/peregrine/peregrine-canary-deploy.yaml b/kube/services/peregrine/peregrine-canary-deploy.yaml
index d43698e67..4fffc1557 100644
--- a/kube/services/peregrine/peregrine-canary-deploy.yaml
+++ b/kube/services/peregrine/peregrine-canary-deploy.yaml
@@ -27,7 +27,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -36,6 +36,22 @@ spec:
values:
- peregrine
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
automountServiceAccountToken: false
volumes:
- name: shared-data
diff --git a/kube/services/peregrine/peregrine-deploy.yaml b/kube/services/peregrine/peregrine-deploy.yaml
index e69ef00c4..20bba64ad 100644
--- a/kube/services/peregrine/peregrine-deploy.yaml
+++ b/kube/services/peregrine/peregrine-deploy.yaml
@@ -33,7 +33,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -42,6 +42,22 @@ spec:
values:
- peregrine
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
automountServiceAccountToken: false
volumes:
- name: shared-data
@@ -167,10 +183,9 @@ spec:
imagePullPolicy: Always
resources:
requests:
- cpu: 1
+ cpu: 100m
memory: 1024Mi
limits:
- cpu: 2
memory: 2048Mi
livenessProbe:
httpGet:
diff --git a/kube/services/pidgin/pidgin-deploy.yaml b/kube/services/pidgin/pidgin-deploy.yaml
index 465b4b2f6..8448f66f9 100644
--- a/kube/services/pidgin/pidgin-deploy.yaml
+++ b/kube/services/pidgin/pidgin-deploy.yaml
@@ -27,7 +27,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -36,6 +36,22 @@ spec:
values:
- pidgin
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
automountServiceAccountToken: false
volumes:
- name: cert-volume
@@ -107,6 +123,8 @@ spec:
subPath: "ca.pem"
imagePullPolicy: Always
resources:
+ requests:
+ cpu: 100m
+ memory: 50Mi
limits:
- cpu: 0.8
memory: 512Mi
diff --git a/kube/services/pod-disruption-budget/ambassador.yaml b/kube/services/pod-disruption-budget/ambassador.yaml
new file mode 100644
index 000000000..72a02e175
--- /dev/null
+++ b/kube/services/pod-disruption-budget/ambassador.yaml
@@ -0,0 +1,9 @@
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ name: ambassador-pdb
+spec:
+ minAvailable: 1
+ selector:
+ matchLabels:
+ app: ambassador
\ No newline at end of file
diff --git a/kube/services/pod-disruption-budget/arborist.yaml b/kube/services/pod-disruption-budget/arborist.yaml
new file mode 100644
index 000000000..3b736a8e0
--- /dev/null
+++ b/kube/services/pod-disruption-budget/arborist.yaml
@@ -0,0 +1,9 @@
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ name: arborist-pdb
+spec:
+ minAvailable: 1
+ selector:
+ matchLabels:
+ app: arborist
\ No newline at end of file
diff --git a/kube/services/pod-disruption-budget/argo-wrapper.yaml b/kube/services/pod-disruption-budget/argo-wrapper.yaml
new file mode 100644
index 000000000..bae800886
--- /dev/null
+++ b/kube/services/pod-disruption-budget/argo-wrapper.yaml
@@ -0,0 +1,9 @@
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ name: argo-wrapper-pdb
+spec:
+ minAvailable: 1
+ selector:
+ matchLabels:
+ app: argo-wrapper
\ No newline at end of file
diff --git a/kube/services/pod-disruption-budget/arranger.yaml b/kube/services/pod-disruption-budget/arranger.yaml
new file mode 100644
index 000000000..b98dc1ea5
--- /dev/null
+++ b/kube/services/pod-disruption-budget/arranger.yaml
@@ -0,0 +1,9 @@
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ name: arranger-pdb
+spec:
+ minAvailable: 1
+ selector:
+ matchLabels:
+ app: arranger
\ No newline at end of file
diff --git a/kube/services/pod-disruption-budget/audit-service.yaml b/kube/services/pod-disruption-budget/audit-service.yaml
new file mode 100644
index 000000000..1ee8054c9
--- /dev/null
+++ b/kube/services/pod-disruption-budget/audit-service.yaml
@@ -0,0 +1,9 @@
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ name: audit-service-pdb
+spec:
+ minAvailable: 1
+ selector:
+ matchLabels:
+ app: audit-service
\ No newline at end of file
diff --git a/kube/services/pod-disruption-budget/aws-es-proxy.yaml b/kube/services/pod-disruption-budget/aws-es-proxy.yaml
new file mode 100644
index 000000000..fc844a0f9
--- /dev/null
+++ b/kube/services/pod-disruption-budget/aws-es-proxy.yaml
@@ -0,0 +1,9 @@
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ name: esproxy-pdb
+spec:
+ minAvailable: 1
+ selector:
+ matchLabels:
+ app: esproxy
\ No newline at end of file
diff --git a/kube/services/pod-disruption-budget/dicom-server.yaml b/kube/services/pod-disruption-budget/dicom-server.yaml
new file mode 100644
index 000000000..5755a0275
--- /dev/null
+++ b/kube/services/pod-disruption-budget/dicom-server.yaml
@@ -0,0 +1,9 @@
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ name: dicom-server-pdb
+spec:
+ minAvailable: 1
+ selector:
+ matchLabels:
+ app: dicom-server
\ No newline at end of file
diff --git a/kube/services/pod-disruption-budget/dicom-viewer.yaml b/kube/services/pod-disruption-budget/dicom-viewer.yaml
new file mode 100644
index 000000000..8b2717a19
--- /dev/null
+++ b/kube/services/pod-disruption-budget/dicom-viewer.yaml
@@ -0,0 +1,9 @@
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ name: dicom-viewer-pdb
+spec:
+ minAvailable: 1
+ selector:
+ matchLabels:
+ app: dicom-viewer
\ No newline at end of file
diff --git a/kube/services/pod-disruption-budget/fence.yaml b/kube/services/pod-disruption-budget/fence.yaml
new file mode 100644
index 000000000..62a26135f
--- /dev/null
+++ b/kube/services/pod-disruption-budget/fence.yaml
@@ -0,0 +1,9 @@
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ name: fence-pdb
+spec:
+ minAvailable: 1
+ selector:
+ matchLabels:
+ app: fence
\ No newline at end of file
diff --git a/kube/services/pod-disruption-budget/guppy.yaml b/kube/services/pod-disruption-budget/guppy.yaml
new file mode 100644
index 000000000..7850a4a68
--- /dev/null
+++ b/kube/services/pod-disruption-budget/guppy.yaml
@@ -0,0 +1,9 @@
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ name: guppy-pdb
+spec:
+ minAvailable: 1
+ selector:
+ matchLabels:
+ app: guppy
\ No newline at end of file
diff --git a/kube/services/pod-disruption-budget/hatchery.yaml b/kube/services/pod-disruption-budget/hatchery.yaml
new file mode 100644
index 000000000..9ddae7bdf
--- /dev/null
+++ b/kube/services/pod-disruption-budget/hatchery.yaml
@@ -0,0 +1,9 @@
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ name: hatchery-pdb
+spec:
+ minAvailable: 1
+ selector:
+ matchLabels:
+ app: hatchery
\ No newline at end of file
diff --git a/kube/services/pod-disruption-budget/indexd.yaml b/kube/services/pod-disruption-budget/indexd.yaml
new file mode 100644
index 000000000..f0e33e774
--- /dev/null
+++ b/kube/services/pod-disruption-budget/indexd.yaml
@@ -0,0 +1,9 @@
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ name: indexd-pdb
+spec:
+ minAvailable: 1
+ selector:
+ matchLabels:
+ app: indexd
\ No newline at end of file
diff --git a/kube/services/pod-disruption-budget/manifestservice.yaml b/kube/services/pod-disruption-budget/manifestservice.yaml
new file mode 100644
index 000000000..af0392992
--- /dev/null
+++ b/kube/services/pod-disruption-budget/manifestservice.yaml
@@ -0,0 +1,9 @@
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ name: manifestservice-pdb
+spec:
+ minAvailable: 1
+ selector:
+ matchLabels:
+ app: manifestservice
\ No newline at end of file
diff --git a/kube/services/pod-disruption-budget/metadata.yaml b/kube/services/pod-disruption-budget/metadata.yaml
new file mode 100644
index 000000000..370977eb0
--- /dev/null
+++ b/kube/services/pod-disruption-budget/metadata.yaml
@@ -0,0 +1,9 @@
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ name: metadata-pdb
+spec:
+ minAvailable: 1
+ selector:
+ matchLabels:
+ app: metadata
\ No newline at end of file
diff --git a/kube/services/pod-disruption-budget/peregrine.yaml b/kube/services/pod-disruption-budget/peregrine.yaml
new file mode 100644
index 000000000..82bee5ef3
--- /dev/null
+++ b/kube/services/pod-disruption-budget/peregrine.yaml
@@ -0,0 +1,9 @@
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ name: peregrine-pdb
+spec:
+ minAvailable: 1
+ selector:
+ matchLabels:
+ app: peregrine
\ No newline at end of file
diff --git a/kube/services/pod-disruption-budget/pidgin.yaml b/kube/services/pod-disruption-budget/pidgin.yaml
new file mode 100644
index 000000000..975fc172f
--- /dev/null
+++ b/kube/services/pod-disruption-budget/pidgin.yaml
@@ -0,0 +1,9 @@
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ name: pidgin-pdb
+spec:
+ minAvailable: 1
+ selector:
+ matchLabels:
+ app: pidgin
\ No newline at end of file
diff --git a/kube/services/pod-disruption-budget/portal.yaml b/kube/services/pod-disruption-budget/portal.yaml
new file mode 100644
index 000000000..04a91c123
--- /dev/null
+++ b/kube/services/pod-disruption-budget/portal.yaml
@@ -0,0 +1,9 @@
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ name: portal-pdb
+spec:
+ minAvailable: 1
+ selector:
+ matchLabels:
+ app: portal
\ No newline at end of file
diff --git a/kube/services/pod-disruption-budget/requestor.yaml b/kube/services/pod-disruption-budget/requestor.yaml
new file mode 100644
index 000000000..c342de5ac
--- /dev/null
+++ b/kube/services/pod-disruption-budget/requestor.yaml
@@ -0,0 +1,9 @@
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ name: requestor-pdb
+spec:
+ minAvailable: 1
+ selector:
+ matchLabels:
+ app: requestor
\ No newline at end of file
diff --git a/kube/services/pod-disruption-budget/revproxy.yaml b/kube/services/pod-disruption-budget/revproxy.yaml
new file mode 100644
index 000000000..7632f6375
--- /dev/null
+++ b/kube/services/pod-disruption-budget/revproxy.yaml
@@ -0,0 +1,9 @@
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ name: revproxy-pdb
+spec:
+ minAvailable: 1
+ selector:
+ matchLabels:
+ app: revproxy
\ No newline at end of file
diff --git a/kube/services/pod-disruption-budget/sheepdog.yaml b/kube/services/pod-disruption-budget/sheepdog.yaml
new file mode 100644
index 000000000..7cf4d6c18
--- /dev/null
+++ b/kube/services/pod-disruption-budget/sheepdog.yaml
@@ -0,0 +1,9 @@
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ name: sheepdog-pdb
+spec:
+ minAvailable: 1
+ selector:
+ matchLabels:
+ app: sheepdog
\ No newline at end of file
diff --git a/kube/services/pod-disruption-budget/ssjdispatcher.yaml b/kube/services/pod-disruption-budget/ssjdispatcher.yaml
new file mode 100644
index 000000000..9c95cfc27
--- /dev/null
+++ b/kube/services/pod-disruption-budget/ssjdispatcher.yaml
@@ -0,0 +1,9 @@
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ name: ssjdispatcher-pdb
+spec:
+ minAvailable: 1
+ selector:
+ matchLabels:
+ app: ssjdispatcher
\ No newline at end of file
diff --git a/kube/services/pod-disruption-budget/wts.yaml b/kube/services/pod-disruption-budget/wts.yaml
new file mode 100644
index 000000000..47eb1b9bf
--- /dev/null
+++ b/kube/services/pod-disruption-budget/wts.yaml
@@ -0,0 +1,9 @@
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ name: wts-pdb
+spec:
+ minAvailable: 1
+ selector:
+ matchLabels:
+ app: wts
\ No newline at end of file
diff --git a/kube/services/portal/portal-deploy.yaml b/kube/services/portal/portal-deploy.yaml
index 41a31b157..742f1b71c 100644
--- a/kube/services/portal/portal-deploy.yaml
+++ b/kube/services/portal/portal-deploy.yaml
@@ -23,7 +23,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -32,6 +32,22 @@ spec:
values:
- portal
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
volumes:
- name: ca-volume
@@ -71,12 +87,8 @@ spec:
failureThreshold: 30
resources:
requests:
- cpu: 0.6
- memory: 512Mi
- limits:
- # portal pigs out on resources at startup, then settles down
cpu: 2
- memory: 4096Mi
+ memory: 3Gi
ports:
- containerPort: 80
- containerPort: 443
diff --git a/kube/services/portal/portal-root-deploy.yaml b/kube/services/portal/portal-root-deploy.yaml
index e65e12ea1..f639a1e15 100644
--- a/kube/services/portal/portal-root-deploy.yaml
+++ b/kube/services/portal/portal-root-deploy.yaml
@@ -23,7 +23,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -32,6 +32,22 @@ spec:
values:
- portal
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
volumes:
- name: ca-volume
@@ -71,12 +87,8 @@ spec:
failureThreshold: 10
resources:
requests:
- cpu: 0.6
- memory: 512Mi
- limits:
- # portal pigs out on resources at startup, then settles down
cpu: 2
- memory: 4096Mi
+ memory: 3Gi
ports:
- containerPort: 80
- containerPort: 443
diff --git a/kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml b/kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml
index 13b27b878..45e6daaea 100644
--- a/kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml
+++ b/kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml
@@ -35,7 +35,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -44,6 +44,22 @@ spec:
values:
- presigned-url-fence
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
automountServiceAccountToken: false
volumes:
- name: yaml-merge
@@ -219,11 +235,10 @@ spec:
subPath: "jwt-keys.tar"
resources:
requests:
- cpu: 0.4
- memory: 1200Mi
+ cpu: 100m
+ memory: 600Mi
limits:
- cpu: 1.0
- memory: 2400Mi
+ memory: 1024Mi
command: ["/bin/bash"]
args:
- "-c"
diff --git a/kube/services/qa-dashboard/qa-dashboard-deployment.yaml b/kube/services/qa-dashboard/qa-dashboard-deployment.yaml
index b61b35058..c701cd90d 100644
--- a/kube/services/qa-dashboard/qa-dashboard-deployment.yaml
+++ b/kube/services/qa-dashboard/qa-dashboard-deployment.yaml
@@ -19,6 +19,23 @@ spec:
public: "yes"
netnolimit: "yes"
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
containers:
- name: qa-metrics
image: "quay.io/cdis/qa-metrics:latest"
diff --git a/kube/services/qabot/qabot-deploy.yaml b/kube/services/qabot/qabot-deploy.yaml
index d8423e5bc..c788180c3 100644
--- a/kube/services/qabot/qabot-deploy.yaml
+++ b/kube/services/qabot/qabot-deploy.yaml
@@ -19,6 +19,23 @@ spec:
app: qabot
netnolimit: "yes"
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
containers:
- name: qabot
image: "quay.io/cdis/qa-bot:latest"
diff --git a/kube/services/requestor/requestor-deploy.yaml b/kube/services/requestor/requestor-deploy.yaml
index 6cba99085..fb5ce173f 100644
--- a/kube/services/requestor/requestor-deploy.yaml
+++ b/kube/services/requestor/requestor-deploy.yaml
@@ -29,7 +29,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -38,6 +38,22 @@ spec:
values:
- requestor
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
automountServiceAccountToken: false
volumes:
- name: config-volume
@@ -76,11 +92,10 @@ spec:
subPath: "requestor-config.yaml"
resources:
requests:
- cpu: 0.4
- memory: 512Mi
+ cpu: 100m
+ memory: 100Mi
limits:
- cpu: 0.8
- memory: 1024Mi
+ memory: 300Mi
initContainers:
- name: requestor-db-migrate
GEN3_REQUESTOR_IMAGE
diff --git a/kube/services/revproxy/gen3.nginx.conf/argo-argo-workflows-server.conf b/kube/services/revproxy/gen3.nginx.conf/argo-server.conf
similarity index 86%
rename from kube/services/revproxy/gen3.nginx.conf/argo-argo-workflows-server.conf
rename to kube/services/revproxy/gen3.nginx.conf/argo-server.conf
index cb8def3aa..1cdd4608c 100644
--- a/kube/services/revproxy/gen3.nginx.conf/argo-argo-workflows-server.conf
+++ b/kube/services/revproxy/gen3.nginx.conf/argo-server.conf
@@ -7,7 +7,7 @@
auth_request /gen3-authz;
set $proxy_service "argo";
- set $upstream http://argo-argo-workflows-server.argo.svc.cluster.local:2746;
+ set $upstream SERVICE_URL;
rewrite ^/argo/(.*) /$1 break;
diff --git a/kube/services/revproxy/gen3.nginx.conf/argocd-server.conf b/kube/services/revproxy/gen3.nginx.conf/argocd-server.conf
new file mode 100644
index 000000000..bdd98712e
--- /dev/null
+++ b/kube/services/revproxy/gen3.nginx.conf/argocd-server.conf
@@ -0,0 +1,19 @@
+ location /argocd/ {
+ error_page 403 @errorworkspace;
+ set $authz_resource "/argocd";
+ set $authz_method "access";
+ set $authz_service "argocd";
+ # be careful - sub-request runs in same context as this request
+ auth_request /gen3-authz;
+
+ set $proxy_service "argocd";
+ set $upstream http://argocd-server.argocd.svc.cluster.local;
+
+ rewrite ^/argocd/(.*) /$1 break;
+
+ proxy_set_header Connection '';
+ proxy_http_version 1.1;
+ chunked_transfer_encoding off;
+
+ proxy_pass $upstream;
+ }
\ No newline at end of file
diff --git a/kube/services/revproxy/gen3.nginx.conf/gen3-discovery-ai-service.conf b/kube/services/revproxy/gen3.nginx.conf/gen3-discovery-ai-service.conf
new file mode 100644
index 000000000..42e9a3758
--- /dev/null
+++ b/kube/services/revproxy/gen3.nginx.conf/gen3-discovery-ai-service.conf
@@ -0,0 +1,12 @@
+ location /ai {
+ if ($csrf_check !~ ^ok-\S.+$) {
+ return 403 "failed csrf check";
+ }
+
+ set $proxy_service "gen3-discovery-ai-service";
+ set $upstream http://gen3-discovery-ai-service$des_domain;
+ rewrite ^/ai/(.*) /$1 break;
+ proxy_pass $upstream;
+ proxy_redirect http://$host/ https://$host/ai/;
+ client_max_body_size 0;
+ }
diff --git a/kube/services/revproxy/gen3.nginx.conf/gen3ff-as-root/frontend-framework-service.conf b/kube/services/revproxy/gen3.nginx.conf/gen3ff-as-root/frontend-framework-service.conf
index ac2cb75f6..37e7623de 100644
--- a/kube/services/revproxy/gen3.nginx.conf/gen3ff-as-root/frontend-framework-service.conf
+++ b/kube/services/revproxy/gen3.nginx.conf/gen3ff-as-root/frontend-framework-service.conf
@@ -2,6 +2,10 @@
if ($csrf_check !~ ^ok-\S.+$) {
return 403 "failed csrf check";
}
+
+ # added to avoid click-jacking attacks
+ add_header X-Frame-Options "SAMEORIGIN";
+
set $proxy_service "frontend-framework";
set $upstream http://frontend-framework-service.$namespace.svc.cluster.local;
proxy_pass $upstream;
diff --git a/kube/services/revproxy/gen3.nginx.conf/gen3ff-as-root/portal-service.conf b/kube/services/revproxy/gen3.nginx.conf/gen3ff-as-root/portal-service.conf
index 58f0851d6..75d69c185 100644
--- a/kube/services/revproxy/gen3.nginx.conf/gen3ff-as-root/portal-service.conf
+++ b/kube/services/revproxy/gen3.nginx.conf/gen3ff-as-root/portal-service.conf
@@ -21,5 +21,8 @@
rewrite ^/(.*)$ /dashboard/Public/maintenance-page/index.html redirect;
}
+ # added to avoid click-jacking attacks
+ add_header X-Frame-Options "SAMEORIGIN";
+
proxy_pass $upstream;
}
diff --git a/kube/services/revproxy/gen3.nginx.conf/grafana.conf b/kube/services/revproxy/gen3.nginx.conf/grafana.conf
index 93e50798d..a78b0684f 100644
--- a/kube/services/revproxy/gen3.nginx.conf/grafana.conf
+++ b/kube/services/revproxy/gen3.nginx.conf/grafana.conf
@@ -1,17 +1,17 @@
- location /grafana/ {
- error_page 403 @errorworkspace;
- set $authz_resource "/prometheus";
- set $authz_method "access";
- set $authz_service "prometheus";
- # be careful - sub-request runs in same context as this request
- auth_request /gen3-authz;
+ # location /grafana/ {
+ # error_page 403 @errorworkspace;
+ # set $authz_resource "/prometheus";
+ # set $authz_method "access";
+ # set $authz_service "prometheus";
+ # # be careful - sub-request runs in same context as this request
+ # auth_request /gen3-authz;
- proxy_set_header Host $host;
- proxy_set_header Authorization "Basic CREDS";
+ # proxy_set_header Host $host;
+ # proxy_set_header Authorization "Basic CREDS";
- set $proxy_service "grafana";
- set $upstream http://grafana.grafana.svc.cluster.local;
- rewrite ^/grafana/(.*) /$1 break;
- proxy_pass $upstream;
- #proxy_redirect http://$host/ https://$host/grafana/;
- }
+ # set $proxy_service "grafana";
+ # set $upstream http://grafana.grafana.svc.cluster.local;
+ # rewrite ^/grafana/(.*) /$1 break;
+ # proxy_pass $upstream;
+ # #proxy_redirect http://$host/ https://$host/grafana/;
+ # }
diff --git a/kube/services/revproxy/gen3.nginx.conf/guacamole-service.conf b/kube/services/revproxy/gen3.nginx.conf/guacamole-service.conf
new file mode 100644
index 000000000..398b2fa59
--- /dev/null
+++ b/kube/services/revproxy/gen3.nginx.conf/guacamole-service.conf
@@ -0,0 +1,20 @@
+#location /guacamole/ {
+location /guac/ {
+# if ($csrf_check !~ ^ok-\S.+$) {
+# return 403 "failed csrf check";
+# }
+
+ set $proxy_service "guacamole-service";
+# set $upstream http://guacamole-service.$namespace.svc.cluster.local/guacamole;
+# rewrite ^/guacamole/(.*) /$1 break;
+ set $upstream http://guacamole-service.$namespace.svc.cluster.local;
+ rewrite ^/guac/(.*) /$1 break;
+ proxy_pass $upstream;
+ proxy_redirect off;
+ proxy_buffering off;
+ proxy_http_version 1.1;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection $http_connection;
+ client_max_body_size 512m;
+}
diff --git a/kube/services/revproxy/gen3.nginx.conf/guppy-service.conf b/kube/services/revproxy/gen3.nginx.conf/guppy-service.conf
index db2de5886..e6d66ec12 100644
--- a/kube/services/revproxy/gen3.nginx.conf/guppy-service.conf
+++ b/kube/services/revproxy/gen3.nginx.conf/guppy-service.conf
@@ -1,4 +1,8 @@
location /guppy/ {
+ if ($csrf_check !~ ^ok-\S.+$) {
+ return 403 "failed csrf check, make sure data-portal version >= 2023.12 or >= 5.19.0";
+ }
+
proxy_connect_timeout 600s;
proxy_send_timeout 600s;
proxy_read_timeout 600s;
diff --git a/kube/services/revproxy/gen3.nginx.conf/kubecost-service.conf b/kube/services/revproxy/gen3.nginx.conf/kubecost-service.conf
index a620d836d..20d57d595 100644
--- a/kube/services/revproxy/gen3.nginx.conf/kubecost-service.conf
+++ b/kube/services/revproxy/gen3.nginx.conf/kubecost-service.conf
@@ -18,7 +18,7 @@
# if not using the jupyterhub service
# this isn't dev namespace friendly, must be manually updated
set $proxy_service "kubecost";
- set $upstream http://kubecost-cost-analyzer.kubecost.svc.cluster.local:443;
+ set $upstream http://kubecost-cost-analyzer.kubecost.svc.cluster.local:9090;
rewrite ^/kubecost/(.*) /$1 break;
proxy_pass $upstream;
proxy_set_header Authorization "$access_token";
diff --git a/kube/services/revproxy/gen3.nginx.conf/ohif-viewer-service.conf b/kube/services/revproxy/gen3.nginx.conf/ohif-viewer-service.conf
new file mode 100644
index 000000000..22926bcf0
--- /dev/null
+++ b/kube/services/revproxy/gen3.nginx.conf/ohif-viewer-service.conf
@@ -0,0 +1,17 @@
+location /ohif-viewer/ {
+ # if ($csrf_check !~ ^ok-\S.+$) {
+ # return 403 "failed csrf check";
+ # }
+
+ # see if this can be fixed in the future for anonymous access
+ # set $authz_resource "/services/ohif-viewer";
+ # set $authz_method "read";
+ # set $authz_service "ohif-viewer";
+
+ # auth_request /gen3-authz;
+
+ set $proxy_service "ohif-viewer";
+ set $upstream http://ohif-viewer-service.$namespace.svc.cluster.local;
+ rewrite ^/ohif-viewer/(.*) /$1 break;
+ proxy_pass $upstream;
+}
diff --git a/kube/services/revproxy/gen3.nginx.conf/orthanc-service.conf b/kube/services/revproxy/gen3.nginx.conf/orthanc-service.conf
new file mode 100644
index 000000000..ed736189c
--- /dev/null
+++ b/kube/services/revproxy/gen3.nginx.conf/orthanc-service.conf
@@ -0,0 +1,41 @@
+location /orthanc/ {
+ # if ($csrf_check !~ ^ok-\S.+$) {
+ # return 403 "failed csrf check";
+ # }
+
+ set $authz_resource "/services/orthanc";
+ set $authz_method "create";
+ set $authz_service "orthanc";
+
+ auth_request /gen3-authz;
+
+ proxy_set_header Authorization "Basic cHVibGljOmhlbGxv";
+
+ set $proxy_service "orthanc";
+ set $upstream http://orthanc-service.$namespace.svc.cluster.local;
+ rewrite ^/orthanc/(.*) /$1 break;
+ proxy_pass $upstream;
+
+ # no limit to payload size so we can upload large DICOM files
+ client_max_body_size 0;
+}
+
+location /orthanc/dicom-web/studies/ {
+ set $authz_method "read";
+ set $authz_resource "/services/orthanc/studies";
+ set $authz_service "orthanc";
+
+ auth_request /gen3-authz;
+ if ($request_method = POST) {
+ return 403;
+ }
+ proxy_set_header Authorization "Basic cHVibGljOmhlbGxv";
+
+ set $proxy_service "orthanc";
+ set $upstream http://orthanc-service.$namespace.svc.cluster.local;
+ rewrite ^/orthanc/(.*) /$1 break;
+ proxy_pass $upstream;
+
+ # no limit to payload size so we can upload large DICOM files
+ client_max_body_size 0;
+}
diff --git a/kube/services/revproxy/gen3.nginx.conf/portal-as-root/frontend-framework-service.conf b/kube/services/revproxy/gen3.nginx.conf/portal-as-root/frontend-framework-service.conf
index dbb24e4b2..f3686d1a6 100644
--- a/kube/services/revproxy/gen3.nginx.conf/portal-as-root/frontend-framework-service.conf
+++ b/kube/services/revproxy/gen3.nginx.conf/portal-as-root/frontend-framework-service.conf
@@ -6,6 +6,10 @@
if ($csrf_check !~ ^ok-\S.+$) {
return 403 "failed csrf check";
}
+
+ # added to avoid click-jacking attacks
+ add_header X-Frame-Options "SAMEORIGIN";
+
set $proxy_service "frontend-framework";
# frontend framework service expects the /ff/ prefix, so no path rewrite
set $upstream http://frontend-framework-service.$namespace.svc.cluster.local;
diff --git a/kube/services/revproxy/gen3.nginx.conf/portal-as-root/portal-service.conf b/kube/services/revproxy/gen3.nginx.conf/portal-as-root/portal-service.conf
index e195d2fb2..9d38a2a99 100644
--- a/kube/services/revproxy/gen3.nginx.conf/portal-as-root/portal-service.conf
+++ b/kube/services/revproxy/gen3.nginx.conf/portal-as-root/portal-service.conf
@@ -21,5 +21,8 @@
rewrite ^/(.*)$ /dashboard/Public/maintenance-page/index.html redirect;
}
+ # added to avoid click-jacking attacks
+ add_header X-Frame-Options "SAMEORIGIN";
+
proxy_pass $upstream;
}
diff --git a/kube/services/revproxy/gen3.nginx.conf/prometheus-server.conf b/kube/services/revproxy/gen3.nginx.conf/prometheus-server.conf
index a9197eec6..c936f541e 100644
--- a/kube/services/revproxy/gen3.nginx.conf/prometheus-server.conf
+++ b/kube/services/revproxy/gen3.nginx.conf/prometheus-server.conf
@@ -7,8 +7,69 @@
auth_request /gen3-authz;
set $proxy_service "prometheus";
- set $upstream http://prometheus-server.prometheus.svc.cluster.local;
- #rewrite ^/prometheus/(.*) /$1 break;
+ set $upstream http://prometheus-kube-prometheus-prometheus.monitoring.svc.cluster.local:9090;
+
+ rewrite ^/prometheus/(.*) /$1 break;
+
proxy_pass $upstream;
- #proxy_redirect http://$host/ https://$host/prometheus/;
}
+ location /grafana/ {
+ error_page 403 @errorworkspace;
+ set $authz_resource "/prometheus";
+ set $authz_method "access";
+ set $authz_service "prometheus";
+ # be careful - sub-request runs in same context as this request
+ auth_request /gen3-authz;
+
+ proxy_set_header Host $http_host;
+
+ set $proxy_service "grafana";
+ set $upstream http://prometheus-grafana.monitoring.svc.cluster.local;
+
+ rewrite ^/grafana/(.*) /$1 break;
+
+ proxy_pass $upstream;
+ }
+ location /alertmanager/ {
+ error_page 403 @errorworkspace;
+ set $authz_resource "/prometheus";
+ set $authz_method "access";
+ set $authz_service "prometheus";
+ # be careful - sub-request runs in same context as this request
+ auth_request /gen3-authz;
+
+ set $proxy_service "alertmanager";
+ set $upstream http://alertmanager-operated.monitoring.svc.cluster.local:9093;
+
+ #rewrite ^/alertmanager/(.*) /$1 break;
+
+ proxy_pass $upstream;
+ }
+ location /thanos-query/ {
+ error_page 403 @errorworkspace;
+ set $authz_resource "/prometheus";
+ set $authz_method "access";
+ set $authz_service "prometheus";
+ # be careful - sub-request runs in same context as this request
+ auth_request /gen3-authz;
+
+ set $proxy_service "thanos-query";
+ set $upstream http://thanos-query.monitoring.svc.cluster.local:9090;
+
+ proxy_pass $upstream;
+ }
+ location /thanos-compactor/ {
+ error_page 403 @errorworkspace;
+ set $authz_resource "/prometheus";
+ set $authz_method "access";
+ set $authz_service "prometheus";
+ # be careful - sub-request runs in same context as this request
+ auth_request /gen3-authz;
+
+ set $proxy_service "thanos-compactor";
+ set $upstream http://thanos-compactor.monitoring.svc.cluster.local:10902;
+
+ rewrite ^/thanos-compactor/(.*) /$1 break;
+
+ proxy_pass $upstream;
+ }
\ No newline at end of file
diff --git a/kube/services/revproxy/nginx.conf b/kube/services/revproxy/nginx.conf
index 79c5d2e22..d0e14f49b 100644
--- a/kube/services/revproxy/nginx.conf
+++ b/kube/services/revproxy/nginx.conf
@@ -44,6 +44,9 @@ types_hash_max_size 2048;
port_in_redirect off;
# server_tokens off;
+# increase max from default 1m
+client_max_body_size 200m;
+
# For websockets
map $http_upgrade $connection_upgrade {
default upgrade;
@@ -156,7 +159,7 @@ perl_set $namespace 'sub { return $ENV{"POD_NAMESPACE"}; }';
##
# For using fence, indexd, etc from a different namespace within the same k8 cluster -
# support data ecosystem feature ...
-##
+##
perl_set $des_domain 'sub { return $ENV{"DES_NAMESPACE"} ? qq{.$ENV{"DES_NAMESPACE"}.svc.cluster.local} : qq{.$ENV{"POD_NAMESPACE"}.svc.cluster.local}; }';
##
@@ -227,13 +230,13 @@ server {
# check request against ip black list
include /etc/nginx/manifest-revproxy/blacklist.conf;
-
+
#
# From https://enable-cors.org/server_nginx.html
# This overrides the individual services
#
set $allow_origin "*";
- if ($http_origin) {
+ if ($http_origin = "https://$host") {
set $allow_origin "$http_origin";
}
@@ -245,7 +248,7 @@ server {
# update service release cookie
add_header Set-Cookie "service_releases=${service_releases};Path=/;Max-Age=600;HttpOnly;Secure;SameSite=Lax";
-
+
if ($request_method = 'OPTIONS') {
return 204;
}
@@ -297,8 +300,8 @@ server {
}
#
- # initialize proxy_service and upstream used as key in logs to
- # unspecified values -
+ # initialize proxy_service and upstream used as key in logs to
+ # unspecified values -
# individual service locations should override to "peregrine", ...
#
set $proxy_service "noproxy";
@@ -328,7 +331,7 @@ server {
proxy_busy_buffers_size 32k;
client_body_buffer_size 16k;
proxy_read_timeout 300;
-
+
#
# also incoming from client:
# * https://fullvalence.com/2016/07/05/cookie-size-in-nginx/
@@ -390,7 +393,7 @@ server {
return 500 "{ \"error\": \"service failure - try again later\"}";
}
- location = /_status {
+ location = /_status {
default_type application/json;
set $upstream http://localhost;
return 200 "{ \"message\": \"Feelin good!\", \"csrf\": \"$csrf_token\" }\n";
diff --git a/kube/services/revproxy/revproxy-deploy.yaml b/kube/services/revproxy/revproxy-deploy.yaml
index 5f0f90f3a..7ea798b77 100644
--- a/kube/services/revproxy/revproxy-deploy.yaml
+++ b/kube/services/revproxy/revproxy-deploy.yaml
@@ -21,12 +21,13 @@ spec:
app: revproxy
# allow access from workspaces
userhelper: "yes"
+ internet: "yes"
GEN3_DATE_LABEL
spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -35,6 +36,22 @@ spec:
values:
- revproxy
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
automountServiceAccountToken: false
volumes:
- name: revproxy-conf
@@ -179,12 +196,12 @@ spec:
mountPath: "/usr/local/share/ca-certificates/cdis/cdis-ca.crt"
subPath: "ca.pem"
resources:
- requests:
+ requests:
cpu: 0.5
memory: 1024Mi
limits:
cpu: 1.0
- memory: 2048Mi
+ memory: 2048Mi
command: ["/bin/sh" ]
args:
- "-c"
diff --git a/kube/services/selenium/selenium-hub-deployment.yaml b/kube/services/selenium/selenium-hub-deployment.yaml
index 35ffe53c7..14f83fe48 100644
--- a/kube/services/selenium/selenium-hub-deployment.yaml
+++ b/kube/services/selenium/selenium-hub-deployment.yaml
@@ -17,6 +17,23 @@ spec:
annotations:
"cluster-autoscaler.kubernetes.io/safe-to-evict": "false"
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
containers:
- env:
- name: GRID_MAX_SESSION
diff --git a/kube/services/selenium/selenium-node-chrome-deployment.yaml b/kube/services/selenium/selenium-node-chrome-deployment.yaml
index 45a1fc231..d6b35f471 100644
--- a/kube/services/selenium/selenium-node-chrome-deployment.yaml
+++ b/kube/services/selenium/selenium-node-chrome-deployment.yaml
@@ -22,6 +22,23 @@ spec:
annotations:
"cluster-autoscaler.kubernetes.io/safe-to-evict": "false"
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
containers:
- env:
- name: SE_EVENT_BUS_HOST
diff --git a/kube/services/sftp/sftp-deploy.yaml b/kube/services/sftp/sftp-deploy.yaml
index 00ad4d8f2..3783c7871 100644
--- a/kube/services/sftp/sftp-deploy.yaml
+++ b/kube/services/sftp/sftp-deploy.yaml
@@ -15,6 +15,23 @@ spec:
app: sftp
GEN3_DATE_LABEL
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
volumes:
- name: sftp-secret
diff --git a/kube/services/sheepdog/sheepdog-canary-deploy.yaml b/kube/services/sheepdog/sheepdog-canary-deploy.yaml
index f4568d97a..f80f73c63 100644
--- a/kube/services/sheepdog/sheepdog-canary-deploy.yaml
+++ b/kube/services/sheepdog/sheepdog-canary-deploy.yaml
@@ -26,7 +26,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -35,6 +35,22 @@ spec:
values:
- sheepdog
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
automountServiceAccountToken: false
volumes:
- name: config-volume
diff --git a/kube/services/sheepdog/sheepdog-deploy.yaml b/kube/services/sheepdog/sheepdog-deploy.yaml
index 9c0d7e18d..a260c8741 100644
--- a/kube/services/sheepdog/sheepdog-deploy.yaml
+++ b/kube/services/sheepdog/sheepdog-deploy.yaml
@@ -31,7 +31,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -40,6 +40,22 @@ spec:
values:
- sheepdog
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
automountServiceAccountToken: false
volumes:
- name: config-volume
@@ -182,8 +198,7 @@ spec:
imagePullPolicy: Always
resources:
requests:
- cpu: 0.8
- memory: 1024Mi
+ cpu: 100m
+ memory: 200Mi
limits:
- cpu: 2
- memory: 2048Mi
+ memory: 800Mi
diff --git a/kube/services/shiny/shiny-deploy.yaml b/kube/services/shiny/shiny-deploy.yaml
index c43c31409..55d795315 100644
--- a/kube/services/shiny/shiny-deploy.yaml
+++ b/kube/services/shiny/shiny-deploy.yaml
@@ -20,6 +20,23 @@ spec:
public: "yes"
GEN3_DATE_LABEL
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
volumes:
- name: config-volume
secret:
diff --git a/kube/services/sower/sower-deploy.yaml b/kube/services/sower/sower-deploy.yaml
index 3069ee31e..b66739d06 100644
--- a/kube/services/sower/sower-deploy.yaml
+++ b/kube/services/sower/sower-deploy.yaml
@@ -26,7 +26,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -35,6 +35,22 @@ spec:
values:
- sower
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: sower-service-account
volumes:
- name: sower-config
@@ -82,9 +98,8 @@ spec:
subPath: sower_config.json
resources:
requests:
- cpu: 0.4
- memory: 1200Mi
+ cpu: 100m
+ memory: 20Mi
limits:
- cpu: 1.0
- memory: 2400Mi
+ memory: 400Mi
\ No newline at end of file
diff --git a/kube/services/spark/spark-deploy.yaml b/kube/services/spark/spark-deploy.yaml
index ebc830be3..b280cecf0 100644
--- a/kube/services/spark/spark-deploy.yaml
+++ b/kube/services/spark/spark-deploy.yaml
@@ -25,7 +25,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -34,6 +34,22 @@ spec:
values:
- spark
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
volumes:
containers:
@@ -64,9 +80,9 @@ spec:
volumeMounts:
imagePullPolicy: Always
resources:
- limits:
- cpu: 0.5
- memory: 2Gi
+ requests:
+ cpu: 3
+ memory: 4Gi
command: ["/bin/bash" ]
args:
- "-c"
diff --git a/kube/services/ssjdispatcher/ssjdispatcher-deploy.yaml b/kube/services/ssjdispatcher/ssjdispatcher-deploy.yaml
index ac2b3246f..554c60cb5 100644
--- a/kube/services/ssjdispatcher/ssjdispatcher-deploy.yaml
+++ b/kube/services/ssjdispatcher/ssjdispatcher-deploy.yaml
@@ -29,7 +29,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -38,6 +38,22 @@ spec:
values:
- ssjdispatcher
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: true
volumes:
- name: ssjdispatcher-creds-volume
@@ -85,8 +101,7 @@ spec:
- containerPort: 8000
resources:
requests:
- cpu: 0.4
- memory: 512Mi
+ cpu: 100m
+ memory: 20Mi
limits:
- cpu: 1
- memory: 2400Mi
+ memory: 100Mi
diff --git a/kube/services/statsd-exporter/statsd-exporter-deploy.yaml b/kube/services/statsd-exporter/statsd-exporter-deploy.yaml
index b608cef28..9a1048f20 100644
--- a/kube/services/statsd-exporter/statsd-exporter-deploy.yaml
+++ b/kube/services/statsd-exporter/statsd-exporter-deploy.yaml
@@ -22,6 +22,23 @@ spec:
app: "statsd-exporter"
GEN3_DATE_LABEL
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
containers:
- name: "statsd-exporter"
GEN3_STATSD-EXPORTER_IMAGE|-image: prom/statsd-exporter:v0.15.0-|
diff --git a/kube/services/status-api/status-api-deploy.yaml b/kube/services/status-api/status-api-deploy.yaml
index 8c9c28775..c4bf542ab 100644
--- a/kube/services/status-api/status-api-deploy.yaml
+++ b/kube/services/status-api/status-api-deploy.yaml
@@ -19,6 +19,23 @@ spec:
app: status-api
netnolimit: "yes"
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
containers:
- name: status-api
image: "quay.io/cdis/status-dashboard:latest"
diff --git a/kube/services/superset/superset-deploy.yaml b/kube/services/superset/superset-deploy.yaml
index 72b9b88be..6a578eb33 100644
--- a/kube/services/superset/superset-deploy.yaml
+++ b/kube/services/superset/superset-deploy.yaml
@@ -22,19 +22,19 @@ metadata:
name: superset-config
labels:
app: superset
- chart: superset-0.7.1
+ chart: superset-0.9.2
release: "superset"
heritage: "Helm"
type: Opaque
stringData:
superset_config.py: |
-
+
import os
from cachelib.redis import RedisCache
-
+
def env(key, default=None):
return os.getenv(key, default)
-
+
MAPBOX_API_KEY = env('MAPBOX_API_KEY', '')
CACHE_CONFIG = {
'CACHE_TYPE': 'redis',
@@ -46,11 +46,11 @@ stringData:
'CACHE_REDIS_DB': env('REDIS_DB', 1),
}
DATA_CACHE_CONFIG = CACHE_CONFIG
-
+
SQLALCHEMY_DATABASE_URI = f"postgresql+psycopg2://{env('DB_USER')}:{env('DB_PASS')}@{env('DB_HOST')}:{env('DB_PORT')}/{env('DB_NAME')}"
SQLALCHEMY_TRACK_MODIFICATIONS = True
- SECRET_KEY = env('SECRET_KEY', '')
-
+ SECRET_KEY = env('SECRET_KEY', 'thisISaSECRET_1234')
+
# Flask-WTF flag for CSRF
WTF_CSRF_ENABLED = True
# Add endpoints that need to be exempt from CSRF protection
@@ -62,15 +62,15 @@ stringData:
CELERY_ANNOTATIONS = {'tasks.add': {'rate_limit': '10/s'}}
BROKER_URL = f"redis://{env('REDIS_HOST')}:{env('REDIS_PORT')}/0"
CELERY_RESULT_BACKEND = f"redis://{env('REDIS_HOST')}:{env('REDIS_PORT')}/0"
-
+
CELERY_CONFIG = CeleryConfig
RESULTS_BACKEND = RedisCache(
host=env('REDIS_HOST'),
port=env('REDIS_PORT'),
key_prefix='superset_results'
)
-
-
+
+
# Overrides
# enable_proxy_fix
# This will make sure the redirect_uri is properly computed, even with SSL offloading
@@ -131,8 +131,16 @@ stringData:
superset db upgrade
echo "Initializing roles..."
superset init
-
-
+
+ # echo "Creating admin user..."
+ # superset fab create-admin \
+ # --username admin \
+ # --firstname Superset \
+ # --lastname Admin \
+ # --email admin@superset.com \
+ # --password admin \
+ # || true
+
if [ -f "/app/configs/import_datasources.yaml" ]; then
echo "Importing database connections.... "
superset import_datasources -p /app/configs/import_datasources.yaml
@@ -171,10 +179,11 @@ metadata:
name: superset
labels:
app: superset
- chart: superset-0.7.1
+ chart: superset-0.9.2
release: superset
heritage: Helm
spec:
+# type: ClusterIP
type: NodePort
ports:
- port: 8088
@@ -208,7 +217,7 @@ metadata:
name: superset-worker
labels:
app: superset-worker
- chart: superset-0.7.1
+ chart: superset-0.9.2
release: superset
heritage: Helm
spec:
@@ -220,14 +229,14 @@ spec:
template:
metadata:
annotations:
- checksum/superset_config.py: 955c3e88940f522fe4d9ad60d105ab4537e290697d135703c8a01aeb6c1a3d8d
- checksum/connections: c44da43c5f3426c3c4a25f3235e3e23452ce1cf713ad059eaef7767e175a5eb4
+ checksum/superset_config.py: 441901105d53c640c7612da3d7b751dc6f770c1796e733ee79c9322d27cd1b5d
+ checksum/connections: a91716d6d1088e870fbe02159dc0b066dd011885aa08a22fbe60ea1cd4720f82
checksum/extraConfigs: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a
checksum/extraSecrets: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a
checksum/extraSecretEnv: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a
- checksum/configOverrides: 4b1ff4f862a95242ea509b5dc5f7d87c47faf1815de5ea21a46b3fde8e576bf4
+ checksum/configOverrides: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a
checksum/configOverridesFiles: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a
-
+
labels:
app: superset-worker
release: superset
@@ -235,23 +244,41 @@ spec:
dbsuperset: "yes"
public: "yes"
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
securityContext:
runAsUser: 0
initContainers:
- command:
- /bin/sh
- -c
- - until nc -zv $DB_HOST $DB_PORT -w1; do echo 'waiting for db'; sleep 1; done
+ - dockerize -wait "tcp://$DB_HOST:$DB_PORT" -wait "tcp://$REDIS_HOST:$REDIS_PORT"
+ -timeout 120s
envFrom:
- secretRef:
name: 'superset-env'
- image: 'busybox:1.35'
+ image: 'jwilder/dockerize:latest'
imagePullPolicy: 'IfNotPresent'
- name: wait-for-postgres
+ name: wait-for-postgres-redis
containers:
- name: superset
- image: "apache/superset:2.0.0"
- imagePullPolicy: Always
+ image: "apache/superset:2.1.0"
+ imagePullPolicy: IfNotPresent
command: ["/bin/sh","-c",". /app/pythonpath/superset_bootstrap.sh; celery --app=superset.tasks.celery_app:app worker"]
env:
- name: "SUPERSET_PORT"
@@ -263,6 +290,17 @@ spec:
- name: superset-config
mountPath: "/app/pythonpath"
readOnly: true
+ livenessProbe:
+ exec:
+ command:
+ - sh
+ - -c
+ - celery -A superset.tasks.celery_app:app inspect ping -d celery@$HOSTNAME
+ failureThreshold: 3
+ initialDelaySeconds: 120
+ periodSeconds: 60
+ successThreshold: 1
+ timeoutSeconds: 60
resources:
limits:
cpu: 1
@@ -295,7 +333,7 @@ metadata:
name: superset
labels:
app: superset
- chart: superset-0.7.1
+ chart: superset-0.9.2
release: superset
heritage: Helm
spec:
@@ -308,14 +346,14 @@ spec:
metadata:
annotations:
# Force reload on config changes
- checksum/superset_config.py: 955c3e88940f522fe4d9ad60d105ab4537e290697d135703c8a01aeb6c1a3d8d
- checksum/superset_init.sh: ff251d03d362c4a3ff1451d24893d5d12811f67edc84efa39484a84c59c3f883
- checksum/superset_bootstrap.sh: a6edf034118d68cef7203cc3181bb6c72b6244cdedf270ee4accc9ae9ff92b2e
- checksum/connections: c44da43c5f3426c3c4a25f3235e3e23452ce1cf713ad059eaef7767e175a5eb4
+ checksum/superset_config.py: 441901105d53c640c7612da3d7b751dc6f770c1796e733ee79c9322d27cd1b5d
+ checksum/superset_init.sh: e6b1e8eac1f7a79a07a6c72a0e2ee6e09654eeb439c6bbe61bfd676917c41e02
+ checksum/superset_bootstrap.sh: dc9a47141051ced34960c313860a55e03eb48c1fa36a0ed25c03ad60cd3b5c48
+ checksum/connections: a91716d6d1088e870fbe02159dc0b066dd011885aa08a22fbe60ea1cd4720f82
checksum/extraConfigs: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a
checksum/extraSecrets: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a
checksum/extraSecretEnv: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a
- checksum/configOverrides: 4b1ff4f862a95242ea509b5dc5f7d87c47faf1815de5ea21a46b3fde8e576bf4
+ checksum/configOverrides: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a
checksum/configOverridesFiles: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a
labels:
app: superset
@@ -330,17 +368,17 @@ spec:
- command:
- /bin/sh
- -c
- - until nc -zv $DB_HOST $DB_PORT -w1; do echo 'waiting for db'; sleep 1; done
+ - dockerize -wait "tcp://$DB_HOST:$DB_PORT" -timeout 120s
envFrom:
- secretRef:
name: 'superset-env'
- image: 'busybox:1.35'
+ image: 'jwilder/dockerize:latest'
imagePullPolicy: 'IfNotPresent'
name: wait-for-postgres
containers:
- name: superset
- image: "apache/superset:2.0.0"
- imagePullPolicy: Always
+ image: "apache/superset:2.1.0"
+ imagePullPolicy: IfNotPresent
command: ["/bin/sh","-c",". /app/pythonpath/superset_bootstrap.sh; /usr/bin/run-server.sh"]
env:
- name: "SUPERSET_PORT"
@@ -356,24 +394,33 @@ spec:
- name: http
containerPort: 8088
protocol: TCP
- livenessProbe:
+ startupProbe:
+ failureThreshold: 60
httpGet:
path: /health
port: http
initialDelaySeconds: 15
- timeoutSeconds: 1
- failureThreshold: 3
- periodSeconds: 15
+ periodSeconds: 5
successThreshold: 1
+ timeoutSeconds: 1
readinessProbe:
+ failureThreshold: 3
httpGet:
path: /health
port: http
initialDelaySeconds: 15
+ periodSeconds: 15
+ successThreshold: 1
timeoutSeconds: 1
+ livenessProbe:
failureThreshold: 3
+ httpGet:
+ path: /health
+ port: http
+ initialDelaySeconds: 15
periodSeconds: 15
successThreshold: 1
+ timeoutSeconds: 1
resources:
limits:
cpu: '0.25'
@@ -421,20 +468,20 @@ spec:
- command:
- /bin/sh
- -c
- - until nc -zv $DB_HOST $DB_PORT -w1; do echo 'waiting for db'; sleep 1; done
+ - dockerize -wait "tcp://$DB_HOST:$DB_PORT" -timeout 120s
envFrom:
- secretRef:
name: 'superset-env'
- image: 'busybox:1.35'
+ image: 'jwilder/dockerize:latest'
imagePullPolicy: 'IfNotPresent'
name: wait-for-postgres
containers:
- name: superset-init-db
- image: "apache/superset:2.0.0"
+ image: "apache/superset:2.1.0"
envFrom:
- secretRef:
name: superset-env
- imagePullPolicy: Always
+ imagePullPolicy: IfNotPresent
volumeMounts:
- name: superset-config
mountPath: "/app/pythonpath"
diff --git a/kube/services/superset/superset-redis.yaml b/kube/services/superset/superset-redis.yaml
index 875e3030b..bd1e6b064 100644
--- a/kube/services/superset/superset-redis.yaml
+++ b/kube/services/superset/superset-redis.yaml
@@ -4,10 +4,10 @@ apiVersion: v1
kind: ServiceAccount
automountServiceAccountToken: true
metadata:
- name: superset-redis
+ name: redis
labels:
app.kubernetes.io/name: redis
- helm.sh/chart: redis-16.3.1
+ helm.sh/chart: redis-17.9.4
app.kubernetes.io/instance: superset
app.kubernetes.io/managed-by: Helm
---
@@ -18,7 +18,7 @@ metadata:
name: superset-redis-configuration
labels:
app.kubernetes.io/name: redis
- helm.sh/chart: redis-16.3.1
+ helm.sh/chart: redis-17.9.4
app.kubernetes.io/instance: superset
app.kubernetes.io/managed-by: Helm
data:
@@ -37,7 +37,6 @@ data:
# End of master configuration
replica.conf: |-
dir /data
- slave-read-only yes
# User-supplied replica configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
@@ -50,7 +49,7 @@ metadata:
name: superset-redis-health
labels:
app.kubernetes.io/name: redis
- helm.sh/chart: redis-16.3.1
+ helm.sh/chart: redis-17.9.4
app.kubernetes.io/instance: superset
app.kubernetes.io/managed-by: Helm
data:
@@ -60,13 +59,13 @@ data:
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
- timeout -s 3 $1 \
+ timeout -s 15 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
- if [ $? == 124 ]; then
+ if [ "$?" -eq "124" ]; then
echo "Timed out"
exit 1
fi
@@ -80,13 +79,13 @@ data:
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
- timeout -s 3 $1 \
+ timeout -s 15 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
- if [ $? == 124 ]; then
+ if [ "$?" -eq "124" ]; then
echo "Timed out"
exit 1
fi
@@ -101,13 +100,13 @@ data:
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
- timeout -s 3 $1 \
+ timeout -s 15 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
- if [ $? == 124 ]; then
+ if [ "$?" -eq "124" ]; then
echo "Timed out"
exit 1
fi
@@ -121,13 +120,13 @@ data:
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
- timeout -s 3 $1 \
+ timeout -s 15 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
- if [ $? == 124 ]; then
+ if [ "$?" -eq "124" ]; then
echo "Timed out"
exit 1
fi
@@ -156,7 +155,7 @@ metadata:
name: superset-redis-scripts
labels:
app.kubernetes.io/name: redis
- helm.sh/chart: redis-16.3.1
+ helm.sh/chart: redis-17.9.4
app.kubernetes.io/instance: superset
app.kubernetes.io/managed-by: Helm
data:
@@ -164,10 +163,10 @@ data:
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
- if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then
+ if [[ -f /opt/bitnami/redis/mounted-etc/master.conf ]];then
cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf
fi
- if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
+ if [[ -f /opt/bitnami/redis/mounted-etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
ARGS=("--port" "${REDIS_PORT}")
@@ -183,7 +182,7 @@ metadata:
name: superset-redis-headless
labels:
app.kubernetes.io/name: redis
- helm.sh/chart: redis-16.3.1
+ helm.sh/chart: redis-17.9.4
app.kubernetes.io/instance: superset
app.kubernetes.io/managed-by: Helm
annotations:
@@ -208,13 +207,14 @@ metadata:
name: superset-redis-master
labels:
app.kubernetes.io/name: redis
- helm.sh/chart: redis-16.3.1
+ helm.sh/chart: redis-17.9.4
app.kubernetes.io/instance: superset
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: master
spec:
type: ClusterIP
-
+ internalTrafficPolicy: Cluster
+ sessionAffinity: None
ports:
- name: tcp-redis
port: 6379
@@ -225,7 +225,7 @@ spec:
app.kubernetes.io/instance: superset
app.kubernetes.io/component: master
---
-# Source: superset/charts/redis/templates/master/statefulset.yaml
+# Source: superset/charts/redis/templates/master/application.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
@@ -234,7 +234,7 @@ metadata:
name: superset-redis-master
labels:
app.kubernetes.io/name: redis
- helm.sh/chart: redis-16.3.1
+ helm.sh/chart: redis-17.9.4
app.kubernetes.io/instance: superset
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: master
@@ -247,7 +247,6 @@ spec:
app.kubernetes.io/component: master
serviceName: superset-redis-headless
updateStrategy:
- rollingUpdate: {}
type: RollingUpdate
template:
metadata:
@@ -255,23 +254,23 @@ spec:
app: superset-redis-master
dbomop-data: "yes"
app.kubernetes.io/name: redis
- helm.sh/chart: redis-16.3.1
+ helm.sh/chart: redis-17.9.4
app.kubernetes.io/instance: superset
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: master
annotations:
- checksum/configmap: 5fb78a3f5ce9ca1af5b7223f9cebe42f832ebc64f37e09a2fc8c8b29bb7101b0
- checksum/health: 2ea27c28e44af78b1d3dc1373aa2ac24ba2b215f788de4a0f0c9e02cbb79c533
- checksum/scripts: c351ebe638f6967b5bc76c2f38c28e2f7f65bc93846a1cd7786e2cbff9d51620
- checksum/secret: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
+ checksum/configmap: 3e37ce809cf97cf1904e4f06e78108dba34472423e16c5e416026bcb192895fb
+ checksum/health: ad98a9690e1f9c5784f1914c5b8e04b1ae2c1ddb7071d05acd6e7c7f0afa6e8f
+ checksum/scripts: 9b0e8fa5fffccc8a213cd402a29a8124753f879a5370299258cb762861c6fb8a
+ checksum/secret: 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b
spec:
-
+
securityContext:
fsGroup: 1001
serviceAccountName: superset-redis
affinity:
podAffinity:
-
+
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
@@ -283,11 +282,11 @@ spec:
topologyKey: kubernetes.io/hostname
weight: 1
nodeAffinity:
-
+
terminationGracePeriodSeconds: 30
containers:
- name: redis
- image: docker.io/bitnami/redis:6.2.6-debian-10-r120
+ image: docker.io/bitnami/redis:7.0.10-debian-11-r4
imagePullPolicy: "IfNotPresent"
securityContext:
runAsUser: 1001
@@ -343,7 +342,6 @@ spec:
mountPath: /health
- name: redis-data
mountPath: /data
- subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
diff --git a/kube/services/superset/superset-secrets-template.yaml b/kube/services/superset/superset-secrets-template.yaml
index 8a3c7a2a6..774a63142 100644
--- a/kube/services/superset/superset-secrets-template.yaml
+++ b/kube/services/superset/superset-secrets-template.yaml
@@ -22,7 +22,7 @@ metadata:
name: superset-env
labels:
app: superset
- chart: superset-0.6.1
+ chart: superset-0.9.2
release: "superset"
heritage: "Helm"
type: Opaque
diff --git a/kube/services/thor/thor-deploy.yaml b/kube/services/thor/thor-deploy.yaml
index 00e57076c..46fc529af 100644
--- a/kube/services/thor/thor-deploy.yaml
+++ b/kube/services/thor/thor-deploy.yaml
@@ -22,7 +22,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -31,11 +31,29 @@ spec:
values:
- thor
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
containers:
- name: thor
GEN3_THOR_IMAGE
env:
+ - name: AWS_DEFAULT_REGION
+ value: us-east-1
- name: RUNNING_IN_QAPLANETV1
value: "true"
- name: JENKINS_USERNAME
diff --git a/kube/services/tty/tty-deploy.yaml b/kube/services/tty/tty-deploy.yaml
index 302ac8ed9..138ee836b 100644
--- a/kube/services/tty/tty-deploy.yaml
+++ b/kube/services/tty/tty-deploy.yaml
@@ -26,7 +26,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -35,6 +35,22 @@ spec:
values:
- tty
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: tty-sa
securityContext:
fsGroup: 1000
diff --git a/kube/services/tube/tube-deploy.yaml b/kube/services/tube/tube-deploy.yaml
index 4eb45d434..6c6feae0f 100644
--- a/kube/services/tube/tube-deploy.yaml
+++ b/kube/services/tube/tube-deploy.yaml
@@ -26,7 +26,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -35,6 +35,22 @@ spec:
values:
- tube
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
automountServiceAccountToken: false
volumes:
- name: creds-volume
diff --git a/kube/services/workflow-age-monitor/application.yaml b/kube/services/workflow-age-monitor/application.yaml
new file mode 100644
index 000000000..99798bb2b
--- /dev/null
+++ b/kube/services/workflow-age-monitor/application.yaml
@@ -0,0 +1,22 @@
+apiVersion: argoproj.io/v1alpha1
+kind: Application
+metadata:
+ name: argo-workflow-age-monitor-application
+ namespace: argocd
+spec:
+ destination:
+ namespace: default
+ server: https://kubernetes.default.svc
+ project: default
+ source:
+ repoURL: https://github.com/uc-cdis/cloud-automation.git
+ targetRevision: master
+ path: kube/services/workflow-age-monitor/
+ directory:
+ exclude: "application.yaml"
+ syncPolicy:
+ automated:
+ prune: true
+ selfHeal: true
+ syncOptions:
+ - CreateNamespace=true
diff --git a/kube/services/workflow-age-monitor/argo-workflow-age.yaml b/kube/services/workflow-age-monitor/argo-workflow-age.yaml
new file mode 100644
index 000000000..0d0c29115
--- /dev/null
+++ b/kube/services/workflow-age-monitor/argo-workflow-age.yaml
@@ -0,0 +1,55 @@
+apiVersion: batch/v1
+kind: CronJob
+metadata:
+ name: argo-workflow-age
+ namespace: default
+spec:
+ schedule: "*/5 * * * *"
+ jobTemplate:
+ spec:
+ template:
+ metadata:
+ labels:
+ app: gen3job
+ spec:
+ serviceAccountName: argo-workflow-monitor
+ containers:
+ - name: kubectl
+ image: quay.io/cdis/awshelper
+ env:
+ # This is 3 * 3600, or 3 hours
+ - name: THRESHOLD_TIME
+ value: "10800"
+ - name: SLACK_WEBHOOK_URL
+ valueFrom:
+ configMapKeyRef:
+ name: global
+ key: slack_webhook
+
+ command: ["/bin/bash"]
+ args:
+ - "-c"
+ - |
+ #!/bin/bash
+ # Get all workflows with specific label and check their age
+ kubectl get workflows --all-namespaces -o json | jq -c '.items[] | {name: .metadata.name, creationTimestamp: .metadata.creationTimestamp}' | while read workflow_info; do
+ WORKFLOW_NAME=$(echo $workflow_info | jq -r '.name')
+ CREATION_TIMESTAMP=$(echo $workflow_info | jq -r '.creationTimestamp')
+
+ # Convert creation timestamp to Unix Epoch time
+ CREATION_EPOCH=$(date -d "$CREATION_TIMESTAMP" +%s)
+
+ # Get current Unix Epoch time
+ CURRENT_EPOCH=$(date +%s)
+
+ # Calculate workflow age in seconds
+ WORKFLOW_AGE=$(($CURRENT_EPOCH - $CREATION_EPOCH))
+
+ # Check if workflow age is greater than threshold
+ if [ "$WORKFLOW_AGE" -gt "$THRESHOLD_TIME" ]; then
+ echo "Workflow $WORKFLOW_NAME has been running for over $THRESHOLD_TIME seconds, sending an alert"
+ # Send alert to Slack
+ curl -X POST -H 'Content-type: application/json' --data "{\"text\":\"WARNING: Workflow \`${WORKFLOW_NAME}\` has been running longer than $THRESHOLD_TIME seconds\"}" $SLACK_WEBHOOK_URL
+ fi
+ done
+ restartPolicy: OnFailure
diff --git a/kube/services/workflow-age-monitor/auth.yaml b/kube/services/workflow-age-monitor/auth.yaml
new file mode 100644
index 000000000..fb7970a3e
--- /dev/null
+++ b/kube/services/workflow-age-monitor/auth.yaml
@@ -0,0 +1,18 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: argo-workflow-monitor
+ namespace: default
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: argo-workflow-monitor-binding
+subjects:
+ - kind: ServiceAccount
+ name: argo-workflow-monitor
+ namespace: default
+roleRef:
+ kind: ClusterRole
+ name: argo-argo-workflows-view
+ apiGroup: rbac.authorization.k8s.io
diff --git a/kube/services/ws-storage/ws-storage-deploy.yaml b/kube/services/ws-storage/ws-storage-deploy.yaml
index f7c50b721..f033e39a1 100644
--- a/kube/services/ws-storage/ws-storage-deploy.yaml
+++ b/kube/services/ws-storage/ws-storage-deploy.yaml
@@ -35,7 +35,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -44,6 +44,22 @@ spec:
values:
- ws-storage
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
volumes:
- name: config-volume
secret:
diff --git a/kube/services/wts/wts-deploy.yaml b/kube/services/wts/wts-deploy.yaml
index 81cd6199d..e54a9cfc4 100644
--- a/kube/services/wts/wts-deploy.yaml
+++ b/kube/services/wts/wts-deploy.yaml
@@ -33,7 +33,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -42,6 +42,22 @@ spec:
values:
- wts
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
terminationGracePeriodSeconds: 10
volumes:
- name: wts-secret
@@ -116,11 +132,10 @@ spec:
port: 80
resources:
requests:
- cpu: 0.8
- memory: 512Mi
+ cpu: 100m
+ memory: 200Mi
limits:
- cpu: 2
- memory: 2048Mi
+ memory: 512Mi
initContainers:
- name: wts-db-migrate
GEN3_WTS_IMAGE
diff --git a/package-lock.json b/package-lock.json
index f88a979b5..bd0b13589 100644
--- a/package-lock.json
+++ b/package-lock.json
@@ -1,14 +1,584 @@
{
"name": "@gen3/cloud-automation",
"version": "1.0.0",
- "lockfileVersion": 1,
+ "lockfileVersion": 2,
"requires": true,
- "dependencies": {
- "@fast-csv/format": {
+ "packages": {
+ "": {
+ "name": "@gen3/cloud-automation",
+ "version": "1.0.0",
+ "license": "Apache-2.0",
+ "dependencies": {
+ "ansi-regex": "^6.0.1",
+ "async": "^3.2.2",
+ "aws-sdk": "^2.814.0",
+ "elasticdump": "^6.84.1",
+ "express": "^4.19.2",
+ "json-schema": "^0.4.0",
+ "minimatch": "^3.0.5",
+ "minimist": "^1.2.6",
+ "requestretry": "^7.0.0"
+ },
+ "devDependencies": {}
+ },
+ "node_modules/accepts": {
+ "version": "1.3.8",
+ "license": "MIT",
+ "dependencies": {
+ "mime-types": "~2.1.34",
+ "negotiator": "0.6.3"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/accepts/node_modules/negotiator": {
+ "version": "0.6.3",
+ "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz",
+ "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/ansi-regex": {
+ "version": "6.0.1",
+ "license": "MIT",
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/ansi-regex?sponsor=1"
+ }
+ },
+ "node_modules/array-flatten": {
+ "version": "1.1.1",
+ "license": "MIT"
+ },
+ "node_modules/asn1": {
+ "version": "0.2.6",
+ "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.6.tgz",
+ "integrity": "sha512-ix/FxPn0MDjeyJ7i/yoHGFt/EX6LyNbxSEhPPXODPL+KB0VPk86UYfL0lMdy+KCnv+fmvIzySwaK5COwqVbWTQ==",
+ "dependencies": {
+ "safer-buffer": "~2.1.0"
+ }
+ },
+ "node_modules/assert-plus": {
+ "version": "1.0.0",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.8"
+ }
+ },
+ "node_modules/async": {
+ "version": "3.2.4",
+ "resolved": "https://registry.npmjs.org/async/-/async-3.2.4.tgz",
+ "integrity": "sha512-iAB+JbDEGXhyIUavoDl9WP/Jj106Kz9DEn1DPgYw5ruDn0e3Wgi3sKFm55sASdGBNOQB8F59d9qQ7deqrHA8wQ=="
+ },
+ "node_modules/asynckit": {
+ "version": "0.4.0",
+ "license": "MIT"
+ },
+ "node_modules/aws-sdk": {
+ "version": "2.1273.0",
+ "resolved": "https://registry.npmjs.org/aws-sdk/-/aws-sdk-2.1273.0.tgz",
+ "integrity": "sha512-QF37fm1DfUxjw+IJtDMTDBckVwAOf8EHQjs4NxJp5TtRkeqtWkxNzq/ViI8kAS+0n8JZaom8Oenmy8ufGfLMAQ==",
+ "dependencies": {
+ "buffer": "4.9.2",
+ "events": "1.1.1",
+ "ieee754": "1.1.13",
+ "jmespath": "0.16.0",
+ "querystring": "0.2.0",
+ "sax": "1.2.1",
+ "url": "0.10.3",
+ "util": "^0.12.4",
+ "uuid": "8.0.0",
+ "xml2js": "0.4.19"
+ },
+ "engines": {
+ "node": ">= 10.0.0"
+ }
+ },
+ "node_modules/aws-sign2": {
+ "version": "0.7.0",
+ "license": "Apache-2.0",
+ "engines": {
+ "node": "*"
+ }
+ },
+ "node_modules/aws4": {
+ "version": "1.11.0",
+ "license": "MIT"
+ },
+ "node_modules/balanced-match": {
+ "version": "1.0.2",
+ "license": "MIT"
+ },
+ "node_modules/base64-js": {
+ "version": "1.5.1",
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/feross"
+ },
+ {
+ "type": "patreon",
+ "url": "https://www.patreon.com/feross"
+ },
+ {
+ "type": "consulting",
+ "url": "https://feross.org/support"
+ }
+ ],
+ "license": "MIT"
+ },
+ "node_modules/bcrypt-pbkdf": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz",
+ "integrity": "sha512-qeFIXtP4MSoi6NLqO12WfqARWWuCKi2Rn/9hJLEmtB5yTNr9DqFWkJRCf2qShWzPeAMRnOgCrq0sg/KLv5ES9w==",
+ "dependencies": {
+ "tweetnacl": "^0.14.3"
+ }
+ },
+ "node_modules/big.js": {
+ "version": "5.2.2",
+ "license": "MIT",
+ "engines": {
+ "node": "*"
+ }
+ },
+ "node_modules/body-parser": {
+ "version": "1.20.2",
+ "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.2.tgz",
+ "integrity": "sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA==",
+ "dependencies": {
+ "bytes": "3.1.2",
+ "content-type": "~1.0.5",
+ "debug": "2.6.9",
+ "depd": "2.0.0",
+ "destroy": "1.2.0",
+ "http-errors": "2.0.0",
+ "iconv-lite": "0.4.24",
+ "on-finished": "2.4.1",
+ "qs": "6.11.0",
+ "raw-body": "2.5.2",
+ "type-is": "~1.6.18",
+ "unpipe": "1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.8",
+ "npm": "1.2.8000 || >= 1.4.16"
+ }
+ },
+ "node_modules/brace-expansion": {
+ "version": "1.1.11",
+ "license": "MIT",
+ "dependencies": {
+ "balanced-match": "^1.0.0",
+ "concat-map": "0.0.1"
+ }
+ },
+ "node_modules/buffer": {
+ "version": "4.9.2",
+ "resolved": "https://registry.npmjs.org/buffer/-/buffer-4.9.2.tgz",
+ "integrity": "sha512-xq+q3SRMOxGivLhBNaUdC64hDTQwejJ+H0T/NB1XMtTVEwNTrfFF3gAxiyW0Bu/xWEGhjVKgUcMhCrUy2+uCWg==",
+ "dependencies": {
+ "base64-js": "^1.0.2",
+ "ieee754": "^1.1.4",
+ "isarray": "^1.0.0"
+ }
+ },
+ "node_modules/buffer-queue": {
+ "version": "1.0.0",
+ "license": "MIT"
+ },
+ "node_modules/bytes": {
+ "version": "3.1.2",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/call-bind": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.7.tgz",
+ "integrity": "sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==",
+ "dependencies": {
+ "es-define-property": "^1.0.0",
+ "es-errors": "^1.3.0",
+ "function-bind": "^1.1.2",
+ "get-intrinsic": "^1.2.4",
+ "set-function-length": "^1.2.1"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/call-bind/node_modules/get-intrinsic": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz",
+ "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==",
+ "dependencies": {
+ "es-errors": "^1.3.0",
+ "function-bind": "^1.1.2",
+ "has-proto": "^1.0.1",
+ "has-symbols": "^1.0.3",
+ "hasown": "^2.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/caseless": {
+ "version": "0.12.0",
+ "license": "Apache-2.0"
+ },
+ "node_modules/combined-stream": {
+ "version": "1.0.8",
+ "license": "MIT",
+ "dependencies": {
+ "delayed-stream": "~1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/concat-map": {
+ "version": "0.0.1",
+ "license": "MIT"
+ },
+ "node_modules/content-disposition": {
+ "version": "0.5.4",
+ "license": "MIT",
+ "dependencies": {
+ "safe-buffer": "5.2.1"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/content-type": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz",
+ "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/cookie": {
+ "version": "0.6.0",
+ "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.6.0.tgz",
+ "integrity": "sha512-U71cyTamuh1CRNCfpGY6to28lxvNwPG4Guz/EVjgf3Jmzv0vlDp1atT9eS5dDjMYHucpHbWns6Lwf3BKz6svdw==",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/cookie-signature": {
+ "version": "1.0.6",
+ "license": "MIT"
+ },
+ "node_modules/core-util-is": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz",
+ "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ=="
+ },
+ "node_modules/dashdash": {
+ "version": "1.14.1",
+ "resolved": "https://registry.npmjs.org/dashdash/-/dashdash-1.14.1.tgz",
+ "integrity": "sha512-jRFi8UDGo6j+odZiEpjazZaWqEal3w/basFjQHQEwVtZJGDpxbH1MeYluwCS8Xq5wmLJooDlMgvVarmWfGM44g==",
+ "dependencies": {
+ "assert-plus": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=0.10"
+ }
+ },
+ "node_modules/debug": {
+ "version": "2.6.9",
+ "license": "MIT",
+ "dependencies": {
+ "ms": "2.0.0"
+ }
+ },
+ "node_modules/define-data-property": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz",
+ "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==",
+ "dependencies": {
+ "es-define-property": "^1.0.0",
+ "es-errors": "^1.3.0",
+ "gopd": "^1.0.1"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/delay": {
+ "version": "5.0.0",
+ "license": "MIT",
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/delayed-stream": {
+ "version": "1.0.0",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.4.0"
+ }
+ },
+ "node_modules/depd": {
+ "version": "2.0.0",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/destroy": {
+ "version": "1.2.0",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.8",
+ "npm": "1.2.8000 || >= 1.4.16"
+ }
+ },
+ "node_modules/ecc-jsbn": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz",
+ "integrity": "sha512-eh9O+hwRHNbG4BLTjEl3nw044CkGm5X6LoaCf7LPp7UU8Qrt47JYNi6nPX8xjW97TKGKm1ouctg0QSpZe9qrnw==",
+ "dependencies": {
+ "jsbn": "~0.1.0",
+ "safer-buffer": "^2.1.0"
+ }
+ },
+ "node_modules/ee-first": {
+ "version": "1.1.1",
+ "license": "MIT"
+ },
+ "node_modules/elasticdump": {
+ "version": "6.94.1",
+ "resolved": "https://registry.npmjs.org/elasticdump/-/elasticdump-6.94.1.tgz",
+ "integrity": "sha512-VThINQBW1MG7k7oVGndPBXCL6cFSfByu2EZo0gch9l7voyv1FfxyrIp9cZ5Ft9Vwygjh7sXSomnWaQ+qzmkfKA==",
+ "dependencies": {
+ "async": "^2.6.4",
+ "aws-sdk": "2.1122.0",
+ "aws4": "^1.11.0",
+ "big.js": "^5.2.2",
+ "bytes": "^3.1.2",
+ "delay": "^5.0.0",
+ "extends-classes": "1.0.5",
+ "fast-csv": "4.3.6",
+ "http-status": "^1.5.1",
+ "ini": "^2.0.0",
+ "JSONStream": "^1.3.5",
+ "lodash": "^4.17.21",
+ "lossless-json": "^1.0.5",
+ "minimist": "^1.2.6",
+ "p-queue": "^6.6.2",
+ "request": "2.88.2",
+ "requestretry": "^7.1.0",
+ "s3-stream-upload": "2.0.2",
+ "s3urls": "^1.5.2",
+ "semver": "5.7.1",
+ "socks5-http-client": "^1.0.4",
+ "socks5-https-client": "^1.2.1"
+ },
+ "bin": {
+ "elasticdump": "bin/elasticdump",
+ "multielasticdump": "bin/multielasticdump"
+ },
+ "engines": {
+ "node": ">=10.0.0"
+ }
+ },
+ "node_modules/elasticdump/node_modules/async": {
+ "version": "2.6.4",
+ "resolved": "https://registry.npmjs.org/async/-/async-2.6.4.tgz",
+ "integrity": "sha512-mzo5dfJYwAn29PeiJ0zvwTo04zj8HDJj0Mn8TD7sno7q12prdbnasKJHhkm2c1LgrhlJ0teaea8860oxi51mGA==",
+ "dependencies": {
+ "lodash": "^4.17.14"
+ }
+ },
+ "node_modules/elasticdump/node_modules/aws-sdk": {
+ "version": "2.1122.0",
+ "resolved": "https://registry.npmjs.org/aws-sdk/-/aws-sdk-2.1122.0.tgz",
+ "integrity": "sha512-545VawhsCQ7yEx9jZKV0hTTW3FS/waycISWMvnNwqRfpU9o4FQ4DSu3je7ekn5yFKM+91dxJC+IfJgtIV8WaUw==",
+ "dependencies": {
+ "buffer": "4.9.2",
+ "events": "1.1.1",
+ "ieee754": "1.1.13",
+ "jmespath": "0.16.0",
+ "querystring": "0.2.0",
+ "sax": "1.2.1",
+ "url": "0.10.3",
+ "uuid": "3.3.2",
+ "xml2js": "0.4.19"
+ },
+ "engines": {
+ "node": ">= 10.0.0"
+ }
+ },
+ "node_modules/elasticdump/node_modules/uuid": {
+ "version": "3.3.2",
+ "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.3.2.tgz",
+ "integrity": "sha512-yXJmeNaw3DnnKAOKJE51sL/ZaYfWJRl1pK9dr19YFCu0ObS231AB1/LbqTKRAQ5kw8A90rA6fr4riOUpTZvQZA==",
+ "deprecated": "Please upgrade to version 7 or higher. Older versions may use Math.random() in certain circumstances, which is known to be problematic. See https://v8.dev/blog/math-random for details.",
+ "bin": {
+ "uuid": "bin/uuid"
+ }
+ },
+ "node_modules/encodeurl": {
+ "version": "1.0.2",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/es-define-property": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz",
+ "integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==",
+ "dependencies": {
+ "get-intrinsic": "^1.2.4"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-define-property/node_modules/get-intrinsic": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz",
+ "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==",
+ "dependencies": {
+ "es-errors": "^1.3.0",
+ "function-bind": "^1.1.2",
+ "has-proto": "^1.0.1",
+ "has-symbols": "^1.0.3",
+ "hasown": "^2.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/es-errors": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz",
+ "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==",
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/escape-html": {
+ "version": "1.0.3",
+ "license": "MIT"
+ },
+ "node_modules/etag": {
+ "version": "1.8.1",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/events": {
+ "version": "1.1.1",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.4.x"
+ }
+ },
+ "node_modules/express": {
+ "version": "4.19.2",
+ "resolved": "https://registry.npmjs.org/express/-/express-4.19.2.tgz",
+ "integrity": "sha512-5T6nhjsT+EOMzuck8JjBHARTHfMht0POzlA60WV2pMD3gyXw2LZnZ+ueGdNxG+0calOJcWKbpFcuzLZ91YWq9Q==",
+ "dependencies": {
+ "accepts": "~1.3.8",
+ "array-flatten": "1.1.1",
+ "body-parser": "1.20.2",
+ "content-disposition": "0.5.4",
+ "content-type": "~1.0.4",
+ "cookie": "0.6.0",
+ "cookie-signature": "1.0.6",
+ "debug": "2.6.9",
+ "depd": "2.0.0",
+ "encodeurl": "~1.0.2",
+ "escape-html": "~1.0.3",
+ "etag": "~1.8.1",
+ "finalhandler": "1.2.0",
+ "fresh": "0.5.2",
+ "http-errors": "2.0.0",
+ "merge-descriptors": "1.0.1",
+ "methods": "~1.1.2",
+ "on-finished": "2.4.1",
+ "parseurl": "~1.3.3",
+ "path-to-regexp": "0.1.7",
+ "proxy-addr": "~2.0.7",
+ "qs": "6.11.0",
+ "range-parser": "~1.2.1",
+ "safe-buffer": "5.2.1",
+ "send": "0.18.0",
+ "serve-static": "1.15.0",
+ "setprototypeof": "1.2.0",
+ "statuses": "2.0.1",
+ "type-is": "~1.6.18",
+ "utils-merge": "1.0.1",
+ "vary": "~1.1.2"
+ },
+ "engines": {
+ "node": ">= 0.10.0"
+ }
+ },
+ "node_modules/extend": {
+ "version": "3.0.2",
+ "license": "MIT"
+ },
+ "node_modules/extends-classes": {
+ "version": "1.0.5",
+ "license": "MIT",
+ "dependencies": {
+ "method-missing": "^1.1.2"
+ },
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/extsprintf": {
+ "version": "1.3.0",
+ "engines": [
+ "node >=0.6.0"
+ ],
+ "license": "MIT"
+ },
+ "node_modules/fast-csv": {
+ "version": "4.3.6",
+ "resolved": "https://registry.npmjs.org/fast-csv/-/fast-csv-4.3.6.tgz",
+ "integrity": "sha512-2RNSpuwwsJGP0frGsOmTb9oUF+VkFSM4SyLTDgwf2ciHWTarN0lQTC+F2f/t5J9QjW+c65VFIAAu85GsvMIusw==",
+ "dependencies": {
+ "@fast-csv/format": "4.3.5",
+ "@fast-csv/parse": "4.3.6"
+ },
+ "engines": {
+ "node": ">=10.0.0"
+ }
+ },
+ "node_modules/fast-csv/node_modules/@fast-csv/format": {
"version": "4.3.5",
"resolved": "https://registry.npmjs.org/@fast-csv/format/-/format-4.3.5.tgz",
"integrity": "sha512-8iRn6QF3I8Ak78lNAa+Gdl5MJJBM5vRHivFtMRUWINdevNo00K7OXxS2PshawLKTejVwieIlPmK5YlLu6w4u8A==",
- "requires": {
+ "dependencies": {
"@types/node": "^14.0.1",
"lodash.escaperegexp": "^4.1.2",
"lodash.isboolean": "^3.0.3",
@@ -17,63 +587,1241 @@
"lodash.isnil": "^4.0.0"
}
},
- "@fast-csv/parse": {
- "version": "4.3.6",
- "resolved": "https://registry.npmjs.org/@fast-csv/parse/-/parse-4.3.6.tgz",
- "integrity": "sha512-uRsLYksqpbDmWaSmzvJcuApSEe38+6NQZBUsuAyMZKqHxH0g1wcJgsKUvN3WC8tewaqFjBMMGrkHmC+T7k8LvA==",
- "requires": {
- "@types/node": "^14.0.1",
- "lodash.escaperegexp": "^4.1.2",
- "lodash.groupby": "^4.6.0",
- "lodash.isfunction": "^3.0.9",
- "lodash.isnil": "^4.0.0",
- "lodash.isundefined": "^3.0.1",
- "lodash.uniq": "^4.5.0"
+ "node_modules/fast-csv/node_modules/@fast-csv/parse": {
+ "version": "4.3.6",
+ "resolved": "https://registry.npmjs.org/@fast-csv/parse/-/parse-4.3.6.tgz",
+ "integrity": "sha512-uRsLYksqpbDmWaSmzvJcuApSEe38+6NQZBUsuAyMZKqHxH0g1wcJgsKUvN3WC8tewaqFjBMMGrkHmC+T7k8LvA==",
+ "dependencies": {
+ "@types/node": "^14.0.1",
+ "lodash.escaperegexp": "^4.1.2",
+ "lodash.groupby": "^4.6.0",
+ "lodash.isfunction": "^3.0.9",
+ "lodash.isnil": "^4.0.0",
+ "lodash.isundefined": "^3.0.1",
+ "lodash.uniq": "^4.5.0"
+ }
+ },
+ "node_modules/fast-csv/node_modules/@types/node": {
+ "version": "14.18.34",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-14.18.34.tgz",
+ "integrity": "sha512-hcU9AIQVHmPnmjRK+XUUYlILlr9pQrsqSrwov/JK1pnf3GTQowVBhx54FbvM0AU/VXGH4i3+vgXS5EguR7fysA=="
+ },
+ "node_modules/fast-deep-equal": {
+ "version": "3.1.3",
+ "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz",
+ "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q=="
+ },
+ "node_modules/fast-json-stable-stringify": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz",
+ "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw=="
+ },
+ "node_modules/finalhandler": {
+ "version": "1.2.0",
+ "license": "MIT",
+ "dependencies": {
+ "debug": "2.6.9",
+ "encodeurl": "~1.0.2",
+ "escape-html": "~1.0.3",
+ "on-finished": "2.4.1",
+ "parseurl": "~1.3.3",
+ "statuses": "2.0.1",
+ "unpipe": "~1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/for-each": {
+ "version": "0.3.3",
+ "license": "MIT",
+ "dependencies": {
+ "is-callable": "^1.1.3"
+ }
+ },
+ "node_modules/for-each/node_modules/is-callable": {
+ "version": "1.2.7",
+ "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.7.tgz",
+ "integrity": "sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/forever-agent": {
+ "version": "0.6.1",
+ "license": "Apache-2.0",
+ "engines": {
+ "node": "*"
+ }
+ },
+ "node_modules/form-data": {
+ "version": "2.3.3",
+ "license": "MIT",
+ "dependencies": {
+ "asynckit": "^0.4.0",
+ "combined-stream": "^1.0.6",
+ "mime-types": "^2.1.12"
+ },
+ "engines": {
+ "node": ">= 0.12"
+ }
+ },
+ "node_modules/forwarded": {
+ "version": "0.2.0",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/fresh": {
+ "version": "0.5.2",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/function-bind": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz",
+ "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==",
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/getpass": {
+ "version": "0.1.7",
+ "resolved": "https://registry.npmjs.org/getpass/-/getpass-0.1.7.tgz",
+ "integrity": "sha512-0fzj9JxOLfJ+XGLhR8ze3unN0KZCgZwiSSDz168VERjK8Wl8kVSdcu2kspd4s4wtAa1y/qrVRiAA0WclVsu0ng==",
+ "dependencies": {
+ "assert-plus": "^1.0.0"
+ }
+ },
+ "node_modules/gopd": {
+ "version": "1.0.1",
+ "license": "MIT",
+ "dependencies": {
+ "get-intrinsic": "^1.1.3"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/gopd/node_modules/get-intrinsic": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz",
+ "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==",
+ "dependencies": {
+ "es-errors": "^1.3.0",
+ "function-bind": "^1.1.2",
+ "has-proto": "^1.0.1",
+ "has-symbols": "^1.0.3",
+ "hasown": "^2.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/har-schema": {
+ "version": "2.0.0",
+ "license": "ISC",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/har-validator": {
+ "version": "5.1.5",
+ "license": "MIT",
+ "dependencies": {
+ "ajv": "^6.12.3",
+ "har-schema": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/har-validator/node_modules/ajv": {
+ "version": "6.12.6",
+ "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz",
+ "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==",
+ "dependencies": {
+ "fast-deep-equal": "^3.1.1",
+ "fast-json-stable-stringify": "^2.0.0",
+ "json-schema-traverse": "^0.4.1",
+ "uri-js": "^4.2.2"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/epoberezkin"
+ }
+ },
+ "node_modules/has-property-descriptors": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz",
+ "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==",
+ "dependencies": {
+ "es-define-property": "^1.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/has-proto": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.3.tgz",
+ "integrity": "sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q==",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/has-symbols": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz",
+ "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/hasown": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz",
+ "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==",
+ "dependencies": {
+ "function-bind": "^1.1.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/http-errors": {
+ "version": "2.0.0",
+ "license": "MIT",
+ "dependencies": {
+ "depd": "2.0.0",
+ "inherits": "2.0.4",
+ "setprototypeof": "1.2.0",
+ "statuses": "2.0.1",
+ "toidentifier": "1.0.1"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/http-signature": {
+ "version": "1.2.0",
+ "license": "MIT",
+ "dependencies": {
+ "assert-plus": "^1.0.0",
+ "jsprim": "^1.2.2",
+ "sshpk": "^1.7.0"
+ },
+ "engines": {
+ "node": ">=0.8",
+ "npm": ">=1.3.7"
+ }
+ },
+ "node_modules/http-signature/node_modules/sshpk": {
+ "version": "1.18.0",
+ "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.18.0.tgz",
+ "integrity": "sha512-2p2KJZTSqQ/I3+HX42EpYOa2l3f8Erv8MWKsy2I9uf4wA7yFIkXRffYdsx86y6z4vHtV8u7g+pPlr8/4ouAxsQ==",
+ "dependencies": {
+ "asn1": "~0.2.3",
+ "assert-plus": "^1.0.0",
+ "bcrypt-pbkdf": "^1.0.0",
+ "dashdash": "^1.12.0",
+ "ecc-jsbn": "~0.1.1",
+ "getpass": "^0.1.1",
+ "jsbn": "~0.1.0",
+ "safer-buffer": "^2.0.2",
+ "tweetnacl": "~0.14.0"
+ },
+ "bin": {
+ "sshpk-conv": "bin/sshpk-conv",
+ "sshpk-sign": "bin/sshpk-sign",
+ "sshpk-verify": "bin/sshpk-verify"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/http-status": {
+ "version": "1.5.3",
+ "resolved": "https://registry.npmjs.org/http-status/-/http-status-1.5.3.tgz",
+ "integrity": "sha512-jCClqdnnwigYslmtfb28vPplOgoiZ0siP2Z8C5Ua+3UKbx410v+c+jT+jh1bbI4TvcEySuX0vd/CfFZFbDkJeQ==",
+ "engines": {
+ "node": ">= 0.4.0"
+ }
+ },
+ "node_modules/iconv-lite": {
+ "version": "0.4.24",
+ "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz",
+ "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==",
+ "dependencies": {
+ "safer-buffer": ">= 2.1.2 < 3"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/ieee754": {
+ "version": "1.1.13",
+ "license": "BSD-3-Clause"
+ },
+ "node_modules/inherits": {
+ "version": "2.0.4",
+ "license": "ISC"
+ },
+ "node_modules/ini": {
+ "version": "2.0.0",
+ "license": "ISC",
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/ipaddr.js": {
+ "version": "1.9.1",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.10"
+ }
+ },
+ "node_modules/is-typedarray": {
+ "version": "1.0.0",
+ "license": "MIT"
+ },
+ "node_modules/isarray": {
+ "version": "1.0.0",
+ "license": "MIT"
+ },
+ "node_modules/isstream": {
+ "version": "0.1.2",
+ "license": "MIT"
+ },
+ "node_modules/jmespath": {
+ "version": "0.16.0",
+ "resolved": "https://registry.npmjs.org/jmespath/-/jmespath-0.16.0.tgz",
+ "integrity": "sha512-9FzQjJ7MATs1tSpnco1K6ayiYE3figslrXA72G2HQ/n76RzvYlofyi5QM+iX4YRs/pu3yzxlVQSST23+dMDknw==",
+ "engines": {
+ "node": ">= 0.6.0"
+ }
+ },
+ "node_modules/jsbn": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-0.1.1.tgz",
+ "integrity": "sha512-UVU9dibq2JcFWxQPA6KCqj5O42VOmAY3zQUfEKxU0KpTGXwNoCjkX1e13eHNvw/xPynt6pU0rZ1htjWTNTSXsg=="
+ },
+ "node_modules/json-schema": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.4.0.tgz",
+ "integrity": "sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA=="
+ },
+ "node_modules/json-schema-traverse": {
+ "version": "0.4.1",
+ "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz",
+ "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg=="
+ },
+ "node_modules/json-stringify-safe": {
+ "version": "5.0.1",
+ "license": "ISC"
+ },
+ "node_modules/jsonparse": {
+ "version": "1.3.1",
+ "engines": [
+ "node >= 0.2.0"
+ ],
+ "license": "MIT"
+ },
+ "node_modules/JSONStream": {
+ "version": "1.3.5",
+ "license": "(MIT OR Apache-2.0)",
+ "dependencies": {
+ "jsonparse": "^1.2.0",
+ "through": ">=2.2.7 <3"
+ },
+ "bin": {
+ "JSONStream": "bin.js"
+ },
+ "engines": {
+ "node": "*"
+ }
+ },
+ "node_modules/jsprim": {
+ "version": "1.4.2",
+ "license": "MIT",
+ "dependencies": {
+ "assert-plus": "1.0.0",
+ "extsprintf": "1.3.0",
+ "json-schema": "0.4.0",
+ "verror": "1.10.0"
+ },
+ "engines": {
+ "node": ">=0.6.0"
+ }
+ },
+ "node_modules/lodash": {
+ "version": "4.17.21",
+ "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz",
+ "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg=="
+ },
+ "node_modules/lodash.escaperegexp": {
+ "version": "4.1.2",
+ "license": "MIT"
+ },
+ "node_modules/lodash.groupby": {
+ "version": "4.6.0",
+ "license": "MIT"
+ },
+ "node_modules/lodash.isboolean": {
+ "version": "3.0.3",
+ "license": "MIT"
+ },
+ "node_modules/lodash.isequal": {
+ "version": "4.5.0",
+ "license": "MIT"
+ },
+ "node_modules/lodash.isfunction": {
+ "version": "3.0.9",
+ "license": "MIT"
+ },
+ "node_modules/lodash.isnil": {
+ "version": "4.0.0",
+ "license": "MIT"
+ },
+ "node_modules/lodash.isundefined": {
+ "version": "3.0.1",
+ "license": "MIT"
+ },
+ "node_modules/lodash.uniq": {
+ "version": "4.5.0",
+ "license": "MIT"
+ },
+ "node_modules/lossless-json": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/lossless-json/-/lossless-json-1.0.5.tgz",
+ "integrity": "sha512-RicKUuLwZVNZ6ZdJHgIZnSeA05p8qWc5NW0uR96mpPIjN9WDLUg9+kj1esQU1GkPn9iLZVKatSQK5gyiaFHgJA=="
+ },
+ "node_modules/media-typer": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz",
+ "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/merge-descriptors": {
+ "version": "1.0.1",
+ "license": "MIT"
+ },
+ "node_modules/method-missing": {
+ "version": "1.2.4",
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/methods": {
+ "version": "1.1.2",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/mime": {
+ "version": "1.6.0",
+ "license": "MIT",
+ "bin": {
+ "mime": "cli.js"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/mime-types": {
+ "version": "2.1.35",
+ "license": "MIT",
+ "dependencies": {
+ "mime-db": "1.52.0"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/mime-types/node_modules/mime-db": {
+ "version": "1.52.0",
+ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
+ "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/minimatch": {
+ "version": "3.1.2",
+ "license": "ISC",
+ "dependencies": {
+ "brace-expansion": "^1.1.7"
+ },
+ "engines": {
+ "node": "*"
+ }
+ },
+ "node_modules/minimist": {
+ "version": "1.2.7",
+ "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.7.tgz",
+ "integrity": "sha512-bzfL1YUZsP41gmu/qjrEk0Q6i2ix/cVeAhbCbqH9u3zYutS1cLg00qhrD0M2MVdCcx4Sc0UpP2eBWo9rotpq6g==",
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/ms": {
+ "version": "2.0.0",
+ "license": "MIT"
+ },
+ "node_modules/oauth-sign": {
+ "version": "0.9.0",
+ "license": "Apache-2.0",
+ "engines": {
+ "node": "*"
+ }
+ },
+ "node_modules/object-inspect": {
+ "version": "1.13.1",
+ "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.1.tgz",
+ "integrity": "sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ==",
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/on-finished": {
+ "version": "2.4.1",
+ "license": "MIT",
+ "dependencies": {
+ "ee-first": "1.1.1"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/p-finally": {
+ "version": "1.0.0",
+ "license": "MIT",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/p-queue": {
+ "version": "6.6.2",
+ "resolved": "https://registry.npmjs.org/p-queue/-/p-queue-6.6.2.tgz",
+ "integrity": "sha512-RwFpb72c/BhQLEXIZ5K2e+AhgNVmIejGlTgiB9MzZ0e93GRvqZ7uSi0dvRF7/XIXDeNkra2fNHBxTyPDGySpjQ==",
+ "dependencies": {
+ "eventemitter3": "^4.0.4",
+ "p-timeout": "^3.2.0"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/p-queue/node_modules/eventemitter3": {
+ "version": "4.0.7",
+ "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz",
+ "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw=="
+ },
+ "node_modules/p-timeout": {
+ "version": "3.2.0",
+ "license": "MIT",
+ "dependencies": {
+ "p-finally": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/parseurl": {
+ "version": "1.3.3",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/path-to-regexp": {
+ "version": "0.1.7",
+ "license": "MIT"
+ },
+ "node_modules/performance-now": {
+ "version": "2.1.0",
+ "license": "MIT"
+ },
+ "node_modules/process-nextick-args": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz",
+ "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag=="
+ },
+ "node_modules/proxy-addr": {
+ "version": "2.0.7",
+ "license": "MIT",
+ "dependencies": {
+ "forwarded": "0.2.0",
+ "ipaddr.js": "1.9.1"
+ },
+ "engines": {
+ "node": ">= 0.10"
+ }
+ },
+ "node_modules/punycode": {
+ "version": "2.1.1",
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/qs": {
+ "version": "6.11.0",
+ "resolved": "https://registry.npmjs.org/qs/-/qs-6.11.0.tgz",
+ "integrity": "sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==",
+ "dependencies": {
+ "side-channel": "^1.0.4"
+ },
+ "engines": {
+ "node": ">=0.6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/querystring": {
+ "version": "0.2.0",
+ "engines": {
+ "node": ">=0.4.x"
+ }
+ },
+ "node_modules/range-parser": {
+ "version": "1.2.1",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/raw-body": {
+ "version": "2.5.2",
+ "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz",
+ "integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==",
+ "dependencies": {
+ "bytes": "3.1.2",
+ "http-errors": "2.0.0",
+ "iconv-lite": "0.4.24",
+ "unpipe": "1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/request": {
+ "version": "2.88.2",
+ "resolved": "https://registry.npmjs.org/request/-/request-2.88.2.tgz",
+ "integrity": "sha512-MsvtOrfG9ZcrOwAW+Qi+F6HbD0CWXEh9ou77uOb7FM2WPhwT7smM833PzanhJLsgXjN89Ir6V2PczXNnMpwKhw==",
+ "deprecated": "request has been deprecated, see https://github.com/request/request/issues/3142",
+ "dependencies": {
+ "aws-sign2": "~0.7.0",
+ "aws4": "^1.8.0",
+ "caseless": "~0.12.0",
+ "combined-stream": "~1.0.6",
+ "extend": "~3.0.2",
+ "forever-agent": "~0.6.1",
+ "form-data": "~2.3.2",
+ "har-validator": "~5.1.3",
+ "http-signature": "~1.2.0",
+ "is-typedarray": "~1.0.0",
+ "isstream": "~0.1.2",
+ "json-stringify-safe": "~5.0.1",
+ "mime-types": "~2.1.19",
+ "oauth-sign": "~0.9.0",
+ "performance-now": "^2.1.0",
+ "qs": "~6.5.2",
+ "safe-buffer": "^5.1.2",
+ "tough-cookie": "~2.5.0",
+ "tunnel-agent": "^0.6.0",
+ "uuid": "^3.3.2"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/request/node_modules/qs": {
+ "version": "6.5.3",
+ "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.3.tgz",
+ "integrity": "sha512-qxXIEh4pCGfHICj1mAJQ2/2XVZkjCDTcEgfoSQxc/fYivUZxTkk7L3bDBJSoNrEzXI17oUO5Dp07ktqE5KzczA==",
+ "engines": {
+ "node": ">=0.6"
+ }
+ },
+ "node_modules/request/node_modules/uuid": {
+ "version": "3.4.0",
+ "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz",
+ "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==",
+ "deprecated": "Please upgrade to version 7 or higher. Older versions may use Math.random() in certain circumstances, which is known to be problematic. See https://v8.dev/blog/math-random for details.",
+ "bin": {
+ "uuid": "bin/uuid"
+ }
+ },
+ "node_modules/requestretry": {
+ "version": "7.1.0",
+ "resolved": "https://registry.npmjs.org/requestretry/-/requestretry-7.1.0.tgz",
+ "integrity": "sha512-TqVDgp251BW4b8ddQ2ptaj/57Z3LZHLscAUT7v6qs70buqF2/IoOVjYbpjJ6HiW7j5+waqegGI8xKJ/+uzgDmw==",
+ "dependencies": {
+ "extend": "^3.0.2",
+ "lodash": "^4.17.15"
+ },
+ "peerDependencies": {
+ "request": "2.*.*"
+ }
+ },
+ "node_modules/s3-stream-upload": {
+ "version": "2.0.2",
+ "engines": [
+ "node >= 0.10.2"
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "buffer-queue": "~1.0.0",
+ "readable-stream": "^2.3.0"
+ }
+ },
+ "node_modules/s3-stream-upload/node_modules/readable-stream": {
+ "version": "2.3.8",
+ "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz",
+ "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==",
+ "dependencies": {
+ "core-util-is": "~1.0.0",
+ "inherits": "~2.0.3",
+ "isarray": "~1.0.0",
+ "process-nextick-args": "~2.0.0",
+ "safe-buffer": "~5.1.1",
+ "string_decoder": "~1.1.1",
+ "util-deprecate": "~1.0.1"
+ }
+ },
+ "node_modules/s3-stream-upload/node_modules/safe-buffer": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
+ "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g=="
+ },
+ "node_modules/s3signed": {
+ "version": "0.1.0",
+ "license": "ISC",
+ "dependencies": {
+ "aws-sdk": "^2.0.4"
+ },
+ "bin": {
+ "s3signed": "bin/s3signed.js"
+ }
+ },
+ "node_modules/s3urls": {
+ "version": "1.5.2",
+ "license": "ISC",
+ "dependencies": {
+ "minimist": "^1.1.0",
+ "s3signed": "^0.1.0"
+ },
+ "bin": {
+ "s3urls": "bin/s3urls.js"
+ }
+ },
+ "node_modules/safe-buffer": {
+ "version": "5.2.1",
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/feross"
+ },
+ {
+ "type": "patreon",
+ "url": "https://www.patreon.com/feross"
+ },
+ {
+ "type": "consulting",
+ "url": "https://feross.org/support"
+ }
+ ],
+ "license": "MIT"
+ },
+ "node_modules/safer-buffer": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
+ "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg=="
+ },
+ "node_modules/sax": {
+ "version": "1.2.1",
+ "license": "ISC"
+ },
+ "node_modules/semver": {
+ "version": "5.7.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz",
+ "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==",
+ "bin": {
+ "semver": "bin/semver"
+ }
+ },
+ "node_modules/send": {
+ "version": "0.18.0",
+ "license": "MIT",
+ "dependencies": {
+ "debug": "2.6.9",
+ "depd": "2.0.0",
+ "destroy": "1.2.0",
+ "encodeurl": "~1.0.2",
+ "escape-html": "~1.0.3",
+ "etag": "~1.8.1",
+ "fresh": "0.5.2",
+ "http-errors": "2.0.0",
+ "mime": "1.6.0",
+ "ms": "2.1.3",
+ "on-finished": "2.4.1",
+ "range-parser": "~1.2.1",
+ "statuses": "2.0.1"
+ },
+ "engines": {
+ "node": ">= 0.8.0"
+ }
+ },
+ "node_modules/send/node_modules/ms": {
+ "version": "2.1.3",
+ "license": "MIT"
+ },
+ "node_modules/serve-static": {
+ "version": "1.15.0",
+ "license": "MIT",
+ "dependencies": {
+ "encodeurl": "~1.0.2",
+ "escape-html": "~1.0.3",
+ "parseurl": "~1.3.3",
+ "send": "0.18.0"
+ },
+ "engines": {
+ "node": ">= 0.8.0"
+ }
+ },
+ "node_modules/set-function-length": {
+ "version": "1.2.2",
+ "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz",
+ "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==",
+ "dependencies": {
+ "define-data-property": "^1.1.4",
+ "es-errors": "^1.3.0",
+ "function-bind": "^1.1.2",
+ "get-intrinsic": "^1.2.4",
+ "gopd": "^1.0.1",
+ "has-property-descriptors": "^1.0.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/set-function-length/node_modules/get-intrinsic": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz",
+ "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==",
+ "dependencies": {
+ "es-errors": "^1.3.0",
+ "function-bind": "^1.1.2",
+ "has-proto": "^1.0.1",
+ "has-symbols": "^1.0.3",
+ "hasown": "^2.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/setprototypeof": {
+ "version": "1.2.0",
+ "license": "ISC"
+ },
+ "node_modules/side-channel": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.6.tgz",
+ "integrity": "sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==",
+ "dependencies": {
+ "call-bind": "^1.0.7",
+ "es-errors": "^1.3.0",
+ "get-intrinsic": "^1.2.4",
+ "object-inspect": "^1.13.1"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/side-channel/node_modules/get-intrinsic": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz",
+ "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==",
+ "dependencies": {
+ "es-errors": "^1.3.0",
+ "function-bind": "^1.1.2",
+ "has-proto": "^1.0.1",
+ "has-symbols": "^1.0.3",
+ "hasown": "^2.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/socks5-client": {
+ "version": "1.2.8",
+ "license": "MIT",
+ "dependencies": {
+ "ip-address": "~6.1.0"
+ },
+ "engines": {
+ "node": ">= 6.4.0"
+ }
+ },
+ "node_modules/socks5-client/node_modules/ip-address": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/ip-address/-/ip-address-6.1.0.tgz",
+ "integrity": "sha512-u9YYtb1p2fWSbzpKmZ/b3QXWA+diRYPxc2c4y5lFB/MMk5WZ7wNZv8S3CFcIGVJ5XtlaCAl/FQy/D3eQ2XtdOA==",
+ "dependencies": {
+ "jsbn": "1.1.0",
+ "lodash": "^4.17.15",
+ "sprintf-js": "1.1.2"
+ },
+ "engines": {
+ "node": ">= 0.10"
}
},
- "@types/node": {
- "version": "14.18.21",
- "resolved": "https://registry.npmjs.org/@types/node/-/node-14.18.21.tgz",
- "integrity": "sha512-x5W9s+8P4XteaxT/jKF0PSb7XEvo5VmqEWgsMlyeY4ZlLK8I6aH6g5TPPyDlLAep+GYf4kefb7HFyc7PAO3m+Q=="
+ "node_modules/socks5-client/node_modules/jsbn": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-1.1.0.tgz",
+ "integrity": "sha512-4bYVV3aAMtDTTu4+xsDYa6sy9GyJ69/amsu9sYF2zqjiEoZA5xJi3BrfX3uY+/IekIu7MwdObdbDWpoZdBv3/A=="
},
- "JSONStream": {
- "version": "1.3.5",
- "resolved": "https://registry.npmjs.org/JSONStream/-/JSONStream-1.3.5.tgz",
- "integrity": "sha512-E+iruNOY8VV9s4JEbe1aNEm6MiszPRr/UfcHMz0TQh1BXSxHK+ASV1R6W4HpjBhSeS+54PIsAMCBmwD06LLsqQ==",
- "requires": {
- "jsonparse": "^1.2.0",
- "through": ">=2.2.7 <3"
+ "node_modules/socks5-http-client": {
+ "version": "1.0.4",
+ "license": "MIT",
+ "dependencies": {
+ "socks5-client": "~1.2.6"
+ },
+ "engines": {
+ "node": ">= 6.4.0"
}
},
- "accepts": {
- "version": "1.3.7",
- "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.7.tgz",
- "integrity": "sha512-Il80Qs2WjYlJIBNzNkK6KYqlVMTbZLXgHx2oT0pU/fjRHyEp+PEfEPY0R3WCwAGVOtauxh1hOxNgIf5bv7dQpA==",
- "requires": {
- "mime-types": "~2.1.24",
- "negotiator": "0.6.2"
+ "node_modules/socks5-https-client": {
+ "version": "1.2.1",
+ "license": "MIT",
+ "dependencies": {
+ "socks5-client": "~1.2.3"
+ },
+ "engines": {
+ "node": ">= 6.4.0"
}
},
- "ajv": {
- "version": "6.12.6",
- "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz",
- "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==",
+ "node_modules/sprintf-js": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.1.2.tgz",
+ "integrity": "sha512-VE0SOVEHCk7Qc8ulkWw3ntAzXuqf7S2lvwQaDLRnUeIEaKNQJzV6BwmLKhOqT61aGhfUMrXeaBk+oDGCzvhcug=="
+ },
+ "node_modules/statuses": {
+ "version": "2.0.1",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/string_decoder": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz",
+ "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==",
+ "dependencies": {
+ "safe-buffer": "~5.1.0"
+ }
+ },
+ "node_modules/string_decoder/node_modules/safe-buffer": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
+ "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g=="
+ },
+ "node_modules/through": {
+ "version": "2.3.8",
+ "license": "MIT"
+ },
+ "node_modules/toidentifier": {
+ "version": "1.0.1",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.6"
+ }
+ },
+ "node_modules/tough-cookie": {
+ "version": "2.5.0",
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "psl": "^1.1.28",
+ "punycode": "^2.1.1"
+ },
+ "engines": {
+ "node": ">=0.8"
+ }
+ },
+ "node_modules/tough-cookie/node_modules/psl": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/psl/-/psl-1.9.0.tgz",
+ "integrity": "sha512-E/ZsdU4HLs/68gYzgGTkMicWTLPdAftJLfJFlLUAAKZGkStNU72sZjT66SnMDVOfOWY/YAoiD7Jxa9iHvngcag=="
+ },
+ "node_modules/tunnel-agent": {
+ "version": "0.6.0",
+ "license": "Apache-2.0",
+ "dependencies": {
+ "safe-buffer": "^5.0.1"
+ },
+ "engines": {
+ "node": "*"
+ }
+ },
+ "node_modules/tweetnacl": {
+ "version": "0.14.5",
+ "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz",
+ "integrity": "sha512-KXXFFdAbFXY4geFIwoyNK+f5Z1b7swfXABfL7HXCmoIWMKU3dmS26672A4EeQtDzLKy7SXmfBu51JolvEKwtGA=="
+ },
+ "node_modules/type-is": {
+ "version": "1.6.18",
+ "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz",
+ "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==",
+ "dependencies": {
+ "media-typer": "0.3.0",
+ "mime-types": "~2.1.24"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/unpipe": {
+ "version": "1.0.0",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/uri-js": {
+ "version": "4.4.1",
+ "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz",
+ "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==",
+ "dependencies": {
+ "punycode": "^2.1.0"
+ }
+ },
+ "node_modules/url": {
+ "version": "0.10.3",
+ "license": "MIT",
+ "dependencies": {
+ "punycode": "1.3.2",
+ "querystring": "0.2.0"
+ }
+ },
+ "node_modules/url/node_modules/punycode": {
+ "version": "1.3.2",
+ "license": "MIT"
+ },
+ "node_modules/util": {
+ "version": "0.12.5",
+ "resolved": "https://registry.npmjs.org/util/-/util-0.12.5.tgz",
+ "integrity": "sha512-kZf/K6hEIrWHI6XqOFUiiMa+79wE/D8Q+NCNAWclkyg3b4d2k7s0QGepNjiABc+aR3N1PAyHL7p6UcLY6LmrnA==",
+ "dependencies": {
+ "inherits": "^2.0.3",
+ "is-arguments": "^1.0.4",
+ "is-generator-function": "^1.0.7",
+ "is-typed-array": "^1.1.3",
+ "which-typed-array": "^1.1.2"
+ }
+ },
+ "node_modules/util-deprecate": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
+ "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw=="
+ },
+ "node_modules/util/node_modules/available-typed-arrays": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.5.tgz",
+ "integrity": "sha512-DMD0KiN46eipeziST1LPP/STfDU0sufISXmjSgvVsoU2tqxctQeASejWcfNtxYKqETM1UxQ8sp2OrSBWpHY6sw==",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/util/node_modules/has-tostringtag": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.0.tgz",
+ "integrity": "sha512-kFjcSNhnlGV1kyoGk7OXKSawH5JOb/LzUc5w9B02hOTO0dfFRjbHQKvg1d6cf3HbeUmtU9VbbV3qzZ2Teh97WQ==",
+ "dependencies": {
+ "has-symbols": "^1.0.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/util/node_modules/is-arguments": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/is-arguments/-/is-arguments-1.1.1.tgz",
+ "integrity": "sha512-8Q7EARjzEnKpt/PCD7e1cgUS0a6X8u5tdSiMqXhojOdoV9TsMsiO+9VLC5vAmO8N7/GmXn7yjR8qnA6bVAEzfA==",
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "has-tostringtag": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/util/node_modules/is-generator-function": {
+ "version": "1.0.10",
+ "resolved": "https://registry.npmjs.org/is-generator-function/-/is-generator-function-1.0.10.tgz",
+ "integrity": "sha512-jsEjy9l3yiXEQ+PsXdmBwEPcOxaXWLspKdplFUVI9vq1iZgIekeC0L167qeu86czQaxed3q/Uzuw0swL0irL8A==",
+ "dependencies": {
+ "has-tostringtag": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/util/node_modules/is-typed-array": {
+ "version": "1.1.10",
+ "resolved": "https://registry.npmjs.org/is-typed-array/-/is-typed-array-1.1.10.tgz",
+ "integrity": "sha512-PJqgEHiWZvMpaFZ3uTc8kHPM4+4ADTlDniuQL7cU/UDA0Ql7F70yGfHph3cLNe+c9toaigv+DFzTJKhc2CtO6A==",
+ "dependencies": {
+ "available-typed-arrays": "^1.0.5",
+ "call-bind": "^1.0.2",
+ "for-each": "^0.3.3",
+ "gopd": "^1.0.1",
+ "has-tostringtag": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/util/node_modules/which-typed-array": {
+ "version": "1.1.9",
+ "resolved": "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.9.tgz",
+ "integrity": "sha512-w9c4xkx6mPidwp7180ckYWfMmvxpjlZuIudNtDf4N/tTAUB8VJbX25qZoAsrtGuYNnGw3pa0AXgbGKRB8/EceA==",
+ "dependencies": {
+ "available-typed-arrays": "^1.0.5",
+ "call-bind": "^1.0.2",
+ "for-each": "^0.3.3",
+ "gopd": "^1.0.1",
+ "has-tostringtag": "^1.0.0",
+ "is-typed-array": "^1.1.10"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/utils-merge": {
+ "version": "1.0.1",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4.0"
+ }
+ },
+ "node_modules/uuid": {
+ "version": "8.0.0",
+ "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.0.0.tgz",
+ "integrity": "sha512-jOXGuXZAWdsTH7eZLtyXMqUb9EcWMGZNbL9YcGBJl4MH4nrxHmZJhEHvyLFrkxo+28uLb/NYRcStH48fnD0Vzw==",
+ "bin": {
+ "uuid": "dist/bin/uuid"
+ }
+ },
+ "node_modules/vary": {
+ "version": "1.1.2",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/verror": {
+ "version": "1.10.0",
+ "engines": [
+ "node >=0.6.0"
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "assert-plus": "^1.0.0",
+ "core-util-is": "1.0.2",
+ "extsprintf": "^1.2.0"
+ }
+ },
+ "node_modules/verror/node_modules/core-util-is": {
+ "version": "1.0.2",
+ "license": "MIT"
+ },
+ "node_modules/xml2js": {
+ "version": "0.4.19",
+ "license": "MIT",
+ "dependencies": {
+ "sax": ">=0.6.0",
+ "xmlbuilder": "~9.0.1"
+ }
+ },
+ "node_modules/xmlbuilder": {
+ "version": "9.0.7",
+ "license": "MIT",
+ "engines": {
+ "node": ">=4.0"
+ }
+ }
+ },
+ "dependencies": {
+ "accepts": {
+ "version": "1.3.8",
"requires": {
- "fast-deep-equal": "^3.1.1",
- "fast-json-stable-stringify": "^2.0.0",
- "json-schema-traverse": "^0.4.1",
- "uri-js": "^4.2.2"
+ "mime-types": "~2.1.34",
+ "negotiator": "0.6.3"
+ },
+ "dependencies": {
+ "negotiator": {
+ "version": "0.6.3",
+ "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz",
+ "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg=="
+ }
}
},
"ansi-regex": {
- "version": "6.0.1",
- "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz",
- "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA=="
+ "version": "6.0.1"
},
"array-flatten": {
- "version": "1.1.1",
- "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz",
- "integrity": "sha1-ml9pkFGx5wczKPKgCJaLZOopVdI="
+ "version": "1.1.1"
},
"asn1": {
"version": "0.2.6",
@@ -84,9 +1832,7 @@
}
},
"assert-plus": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz",
- "integrity": "sha512-NfJ4UzBCcQGLDlQq7nHxH+tv3kyZ0hHQqF5BO6J7tNJeP5do1llPr8dZ8zHonfhAu0PHAdMkSo+8o0wxg9lZWw=="
+ "version": "1.0.0"
},
"async": {
"version": "3.2.4",
@@ -94,14 +1840,12 @@
"integrity": "sha512-iAB+JbDEGXhyIUavoDl9WP/Jj106Kz9DEn1DPgYw5ruDn0e3Wgi3sKFm55sASdGBNOQB8F59d9qQ7deqrHA8wQ=="
},
"asynckit": {
- "version": "0.4.0",
- "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz",
- "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q=="
+ "version": "0.4.0"
},
"aws-sdk": {
- "version": "2.1154.0",
- "resolved": "https://registry.npmjs.org/aws-sdk/-/aws-sdk-2.1154.0.tgz",
- "integrity": "sha512-SIxLcWGsnW9Sl2P+a+uoqebBsfjeAZZOQokzgDj3VoESnFzsjI+2REi9CdvvSvwlfFUP7sFr6A0khrYNDJLebQ==",
+ "version": "2.1273.0",
+ "resolved": "https://registry.npmjs.org/aws-sdk/-/aws-sdk-2.1273.0.tgz",
+ "integrity": "sha512-QF37fm1DfUxjw+IJtDMTDBckVwAOf8EHQjs4NxJp5TtRkeqtWkxNzq/ViI8kAS+0n8JZaom8Oenmy8ufGfLMAQ==",
"requires": {
"buffer": "4.9.2",
"events": "1.1.1",
@@ -110,36 +1854,22 @@
"querystring": "0.2.0",
"sax": "1.2.1",
"url": "0.10.3",
+ "util": "^0.12.4",
"uuid": "8.0.0",
"xml2js": "0.4.19"
- },
- "dependencies": {
- "uuid": {
- "version": "8.0.0",
- "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.0.0.tgz",
- "integrity": "sha512-jOXGuXZAWdsTH7eZLtyXMqUb9EcWMGZNbL9YcGBJl4MH4nrxHmZJhEHvyLFrkxo+28uLb/NYRcStH48fnD0Vzw=="
- }
}
},
"aws-sign2": {
- "version": "0.7.0",
- "resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.7.0.tgz",
- "integrity": "sha512-08kcGqnYf/YmjoRhfxyu+CLxBjUtHLXLXX/vUfx9l2LYzG3c1m61nrpyFUZI6zeS+Li/wWMMidD9KgrqtGq3mA=="
+ "version": "0.7.0"
},
"aws4": {
- "version": "1.11.0",
- "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.11.0.tgz",
- "integrity": "sha512-xh1Rl34h6Fi1DC2WWKfxUTVqRsNnr6LsKz2+hfwDxQJWmrx8+c7ylaqBMcHfl1U1r2dsifOvKX3LQuLNZ+XSvA=="
+ "version": "1.11.0"
},
"balanced-match": {
- "version": "1.0.2",
- "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz",
- "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw=="
+ "version": "1.0.2"
},
"base64-js": {
- "version": "1.5.1",
- "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz",
- "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA=="
+ "version": "1.5.1"
},
"bcrypt-pbkdf": {
"version": "1.0.2",
@@ -150,38 +1880,29 @@
}
},
"big.js": {
- "version": "5.2.2",
- "resolved": "https://registry.npmjs.org/big.js/-/big.js-5.2.2.tgz",
- "integrity": "sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ=="
+ "version": "5.2.2"
},
"body-parser": {
- "version": "1.19.0",
- "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.19.0.tgz",
- "integrity": "sha512-dhEPs72UPbDnAQJ9ZKMNTP6ptJaionhP5cBb541nXPlW60Jepo9RV/a4fX4XWW9CuFNK22krhrj1+rgzifNCsw==",
+ "version": "1.20.2",
+ "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.2.tgz",
+ "integrity": "sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA==",
"requires": {
- "bytes": "3.1.0",
- "content-type": "~1.0.4",
+ "bytes": "3.1.2",
+ "content-type": "~1.0.5",
"debug": "2.6.9",
- "depd": "~1.1.2",
- "http-errors": "1.7.2",
+ "depd": "2.0.0",
+ "destroy": "1.2.0",
+ "http-errors": "2.0.0",
"iconv-lite": "0.4.24",
- "on-finished": "~2.3.0",
- "qs": "6.7.0",
- "raw-body": "2.4.0",
- "type-is": "~1.6.17"
- },
- "dependencies": {
- "qs": {
- "version": "6.7.0",
- "resolved": "https://registry.npmjs.org/qs/-/qs-6.7.0.tgz",
- "integrity": "sha512-VCdBRNFTX1fyE7Nb6FYoURo/SPe62QCaAyzJvUjwRaIsc+NePBEniHlvxFmmX56+HZphIGtV0XeCirBtpDrTyQ=="
- }
+ "on-finished": "2.4.1",
+ "qs": "6.11.0",
+ "raw-body": "2.5.2",
+ "type-is": "~1.6.18",
+ "unpipe": "1.0.0"
}
},
"brace-expansion": {
"version": "1.1.11",
- "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
- "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==",
"requires": {
"balanced-match": "^1.0.0",
"concat-map": "0.0.1"
@@ -198,67 +1919,72 @@
}
},
"buffer-queue": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/buffer-queue/-/buffer-queue-1.0.0.tgz",
- "integrity": "sha512-HNAysvwrmORbm5w5rB6yCz2Sab+ATCW6RSAOVWJmaRnPviPfuNO8+f3R0MyCJMUhL8sMx88LcawtIcfjHERhVA=="
+ "version": "1.0.0"
},
"bytes": {
- "version": "3.1.0",
- "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.0.tgz",
- "integrity": "sha512-zauLjrfCG+xvoyaqLoV8bLVXXNGC4JqlxFCutSDWA6fJrTo2ZuvLYTqZ7aHBLZSMOopbzwv8f+wZcVzfVTI2Dg=="
+ "version": "3.1.2"
+ },
+ "call-bind": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.7.tgz",
+ "integrity": "sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==",
+ "requires": {
+ "es-define-property": "^1.0.0",
+ "es-errors": "^1.3.0",
+ "function-bind": "^1.1.2",
+ "get-intrinsic": "^1.2.4",
+ "set-function-length": "^1.2.1"
+ },
+ "dependencies": {
+ "get-intrinsic": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz",
+ "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==",
+ "requires": {
+ "es-errors": "^1.3.0",
+ "function-bind": "^1.1.2",
+ "has-proto": "^1.0.1",
+ "has-symbols": "^1.0.3",
+ "hasown": "^2.0.0"
+ }
+ }
+ }
},
"caseless": {
- "version": "0.12.0",
- "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.12.0.tgz",
- "integrity": "sha512-4tYFyifaFfGacoiObjJegolkwSU4xQNGbVgUiNYVUxbQ2x2lUsFvY4hVgVzGiIe6WLOPqycWXA40l+PWsxthUw=="
+ "version": "0.12.0"
},
"combined-stream": {
"version": "1.0.8",
- "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz",
- "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==",
"requires": {
"delayed-stream": "~1.0.0"
}
},
"concat-map": {
- "version": "0.0.1",
- "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz",
- "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg=="
+ "version": "0.0.1"
},
"content-disposition": {
- "version": "0.5.3",
- "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.3.tgz",
- "integrity": "sha512-ExO0774ikEObIAEV9kDo50o+79VCUdEB6n6lzKgGwupcVeRlhrj3qGAfwq8G6uBJjkqLrhT0qEYFcWng8z1z0g==",
+ "version": "0.5.4",
"requires": {
- "safe-buffer": "5.1.2"
- },
- "dependencies": {
- "safe-buffer": {
- "version": "5.1.2",
- "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
- "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g=="
- }
+ "safe-buffer": "5.2.1"
}
},
"content-type": {
- "version": "1.0.4",
- "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz",
- "integrity": "sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA=="
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz",
+ "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA=="
},
"cookie": {
- "version": "0.4.0",
- "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.4.0.tgz",
- "integrity": "sha512-+Hp8fLp57wnUSt0tY0tHEXh4voZRDnoIrZPqlo3DPiI4y9lwg/jqx+1Om94/W6ZaPDOUbnjOt/99w66zk+l1Xg=="
+ "version": "0.6.0",
+ "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.6.0.tgz",
+ "integrity": "sha512-U71cyTamuh1CRNCfpGY6to28lxvNwPG4Guz/EVjgf3Jmzv0vlDp1atT9eS5dDjMYHucpHbWns6Lwf3BKz6svdw=="
},
"cookie-signature": {
- "version": "1.0.6",
- "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz",
- "integrity": "sha1-4wOogrNCzD7oylE6eZmXNNqzriw="
+ "version": "1.0.6"
},
"core-util-is": {
- "version": "1.0.2",
- "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz",
- "integrity": "sha512-3lqz5YjWTYnW6dlDa5TLaTCcShfar1e40rmcJVwCBJC6mWlFuj0eCHIElmG1g5kyuJ/GD+8Wn4FFCcz4gJPfaQ=="
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz",
+ "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ=="
},
"dashdash": {
"version": "1.14.1",
@@ -270,31 +1996,31 @@
},
"debug": {
"version": "2.6.9",
- "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
- "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
"requires": {
"ms": "2.0.0"
}
},
+ "define-data-property": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz",
+ "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==",
+ "requires": {
+ "es-define-property": "^1.0.0",
+ "es-errors": "^1.3.0",
+ "gopd": "^1.0.1"
+ }
+ },
"delay": {
- "version": "5.0.0",
- "resolved": "https://registry.npmjs.org/delay/-/delay-5.0.0.tgz",
- "integrity": "sha512-ReEBKkIfe4ya47wlPYf/gu5ib6yUG0/Aez0JQZQz94kiWtRQvZIQbTiehsnwHvLSWJnQdhVeqYue7Id1dKr0qw=="
+ "version": "5.0.0"
},
"delayed-stream": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz",
- "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ=="
+ "version": "1.0.0"
},
"depd": {
- "version": "1.1.2",
- "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz",
- "integrity": "sha1-m81S4UwJd2PnSbJ0xDRu0uVgtak="
+ "version": "2.0.0"
},
"destroy": {
- "version": "1.0.4",
- "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.0.4.tgz",
- "integrity": "sha1-l4hXRCxEdJ5CBmE+N5RiBYJqvYA="
+ "version": "1.2.0"
},
"ecc-jsbn": {
"version": "0.1.2",
@@ -306,16 +2032,13 @@
}
},
"ee-first": {
- "version": "1.1.1",
- "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz",
- "integrity": "sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0="
+ "version": "1.1.1"
},
"elasticdump": {
- "version": "6.84.1",
- "resolved": "https://registry.npmjs.org/elasticdump/-/elasticdump-6.84.1.tgz",
- "integrity": "sha512-qgHJeGGNMJFwGMpidCOCKZsbq6bUth2cvns1QdrJnCIoojv5x0J4C6Xm5zh8sZCYr7y5nrwfgMUkrbMNLHdGwQ==",
+ "version": "6.94.1",
+ "resolved": "https://registry.npmjs.org/elasticdump/-/elasticdump-6.94.1.tgz",
+ "integrity": "sha512-VThINQBW1MG7k7oVGndPBXCL6cFSfByu2EZo0gch9l7voyv1FfxyrIp9cZ5Ft9Vwygjh7sXSomnWaQ+qzmkfKA==",
"requires": {
- "JSONStream": "^1.3.5",
"async": "^2.6.4",
"aws-sdk": "2.1122.0",
"aws4": "^1.11.0",
@@ -326,6 +2049,7 @@
"fast-csv": "4.3.6",
"http-status": "^1.5.1",
"ini": "^2.0.0",
+ "JSONStream": "^1.3.5",
"lodash": "^4.17.21",
"lossless-json": "^1.0.5",
"minimist": "^1.2.6",
@@ -363,104 +2087,101 @@
"xml2js": "0.4.19"
}
},
- "bytes": {
- "version": "3.1.2",
- "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz",
- "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg=="
+ "uuid": {
+ "version": "3.3.2",
+ "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.3.2.tgz",
+ "integrity": "sha512-yXJmeNaw3DnnKAOKJE51sL/ZaYfWJRl1pK9dr19YFCu0ObS231AB1/LbqTKRAQ5kw8A90rA6fr4riOUpTZvQZA=="
}
}
},
"encodeurl": {
- "version": "1.0.2",
- "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz",
- "integrity": "sha1-rT/0yG7C0CkyL1oCw6mmBslbP1k="
+ "version": "1.0.2"
},
- "escape-html": {
- "version": "1.0.3",
- "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz",
- "integrity": "sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg="
+ "es-define-property": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz",
+ "integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==",
+ "requires": {
+ "get-intrinsic": "^1.2.4"
+ },
+ "dependencies": {
+ "get-intrinsic": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz",
+ "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==",
+ "requires": {
+ "es-errors": "^1.3.0",
+ "function-bind": "^1.1.2",
+ "has-proto": "^1.0.1",
+ "has-symbols": "^1.0.3",
+ "hasown": "^2.0.0"
+ }
+ }
+ }
},
- "etag": {
- "version": "1.8.1",
- "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz",
- "integrity": "sha1-Qa4u62XvpiJorr/qg6x9eSmbCIc="
+ "es-errors": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz",
+ "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw=="
},
- "eventemitter3": {
- "version": "4.0.7",
- "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz",
- "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw=="
+ "escape-html": {
+ "version": "1.0.3"
+ },
+ "etag": {
+ "version": "1.8.1"
},
"events": {
- "version": "1.1.1",
- "resolved": "https://registry.npmjs.org/events/-/events-1.1.1.tgz",
- "integrity": "sha512-kEcvvCBByWXGnZy6JUlgAp2gBIUjfCAV6P6TgT1/aaQKcmuAEC4OZTV1I4EWQLz2gxZw76atuVyvHhTxvi0Flw=="
+ "version": "1.1.1"
},
"express": {
- "version": "4.17.1",
- "resolved": "https://registry.npmjs.org/express/-/express-4.17.1.tgz",
- "integrity": "sha512-mHJ9O79RqluphRrcw2X/GTh3k9tVv8YcoyY4Kkh4WDMUYKRZUq0h1o0w2rrrxBqM7VoeUVqgb27xlEMXTnYt4g==",
+ "version": "4.19.2",
+ "resolved": "https://registry.npmjs.org/express/-/express-4.19.2.tgz",
+ "integrity": "sha512-5T6nhjsT+EOMzuck8JjBHARTHfMht0POzlA60WV2pMD3gyXw2LZnZ+ueGdNxG+0calOJcWKbpFcuzLZ91YWq9Q==",
"requires": {
- "accepts": "~1.3.7",
+ "accepts": "~1.3.8",
"array-flatten": "1.1.1",
- "body-parser": "1.19.0",
- "content-disposition": "0.5.3",
+ "body-parser": "1.20.2",
+ "content-disposition": "0.5.4",
"content-type": "~1.0.4",
- "cookie": "0.4.0",
+ "cookie": "0.6.0",
"cookie-signature": "1.0.6",
"debug": "2.6.9",
- "depd": "~1.1.2",
+ "depd": "2.0.0",
"encodeurl": "~1.0.2",
"escape-html": "~1.0.3",
"etag": "~1.8.1",
- "finalhandler": "~1.1.2",
+ "finalhandler": "1.2.0",
"fresh": "0.5.2",
+ "http-errors": "2.0.0",
"merge-descriptors": "1.0.1",
"methods": "~1.1.2",
- "on-finished": "~2.3.0",
+ "on-finished": "2.4.1",
"parseurl": "~1.3.3",
"path-to-regexp": "0.1.7",
- "proxy-addr": "~2.0.5",
- "qs": "6.7.0",
+ "proxy-addr": "~2.0.7",
+ "qs": "6.11.0",
"range-parser": "~1.2.1",
- "safe-buffer": "5.1.2",
- "send": "0.17.1",
- "serve-static": "1.14.1",
- "setprototypeof": "1.1.1",
- "statuses": "~1.5.0",
+ "safe-buffer": "5.2.1",
+ "send": "0.18.0",
+ "serve-static": "1.15.0",
+ "setprototypeof": "1.2.0",
+ "statuses": "2.0.1",
"type-is": "~1.6.18",
"utils-merge": "1.0.1",
"vary": "~1.1.2"
- },
- "dependencies": {
- "qs": {
- "version": "6.7.0",
- "resolved": "https://registry.npmjs.org/qs/-/qs-6.7.0.tgz",
- "integrity": "sha512-VCdBRNFTX1fyE7Nb6FYoURo/SPe62QCaAyzJvUjwRaIsc+NePBEniHlvxFmmX56+HZphIGtV0XeCirBtpDrTyQ=="
- },
- "safe-buffer": {
- "version": "5.1.2",
- "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
- "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g=="
- }
}
},
"extend": {
- "version": "3.0.2",
- "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz",
- "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g=="
+ "version": "3.0.2"
},
"extends-classes": {
"version": "1.0.5",
- "resolved": "https://registry.npmjs.org/extends-classes/-/extends-classes-1.0.5.tgz",
- "integrity": "sha512-ccyBHFN+wFM0dz0hvuQntSH9KST9951ua1hr3yxeFfu+h3H/eHw1RavE8XAEVi9K8dh534Mk3xA+pjk7VHkUcQ==",
"requires": {
"method-missing": "^1.1.2"
}
},
"extsprintf": {
- "version": "1.3.0",
- "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz",
- "integrity": "sha512-11Ndz7Nv+mvAC1j0ktTa7fAb0vLyGGX+rMHNBYQviQDGU0Hw7lhctJANqbPhu9nV9/izT/IntTgZ7Im/9LJs9g=="
+ "version": "1.3.0"
},
"fast-csv": {
"version": "4.3.6",
@@ -469,6 +2190,40 @@
"requires": {
"@fast-csv/format": "4.3.5",
"@fast-csv/parse": "4.3.6"
+ },
+ "dependencies": {
+ "@fast-csv/format": {
+ "version": "4.3.5",
+ "resolved": "https://registry.npmjs.org/@fast-csv/format/-/format-4.3.5.tgz",
+ "integrity": "sha512-8iRn6QF3I8Ak78lNAa+Gdl5MJJBM5vRHivFtMRUWINdevNo00K7OXxS2PshawLKTejVwieIlPmK5YlLu6w4u8A==",
+ "requires": {
+ "@types/node": "^14.0.1",
+ "lodash.escaperegexp": "^4.1.2",
+ "lodash.isboolean": "^3.0.3",
+ "lodash.isequal": "^4.5.0",
+ "lodash.isfunction": "^3.0.9",
+ "lodash.isnil": "^4.0.0"
+ }
+ },
+ "@fast-csv/parse": {
+ "version": "4.3.6",
+ "resolved": "https://registry.npmjs.org/@fast-csv/parse/-/parse-4.3.6.tgz",
+ "integrity": "sha512-uRsLYksqpbDmWaSmzvJcuApSEe38+6NQZBUsuAyMZKqHxH0g1wcJgsKUvN3WC8tewaqFjBMMGrkHmC+T7k8LvA==",
+ "requires": {
+ "@types/node": "^14.0.1",
+ "lodash.escaperegexp": "^4.1.2",
+ "lodash.groupby": "^4.6.0",
+ "lodash.isfunction": "^3.0.9",
+ "lodash.isnil": "^4.0.0",
+ "lodash.isundefined": "^3.0.1",
+ "lodash.uniq": "^4.5.0"
+ }
+ },
+ "@types/node": {
+ "version": "14.18.34",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-14.18.34.tgz",
+ "integrity": "sha512-hcU9AIQVHmPnmjRK+XUUYlILlr9pQrsqSrwov/JK1pnf3GTQowVBhx54FbvM0AU/VXGH4i3+vgXS5EguR7fysA=="
+ }
}
},
"fast-deep-equal": {
@@ -482,28 +2237,35 @@
"integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw=="
},
"finalhandler": {
- "version": "1.1.2",
- "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.1.2.tgz",
- "integrity": "sha512-aAWcW57uxVNrQZqFXjITpW3sIUQmHGG3qSb9mUah9MgMC4NeWhNOlNjXEYq3HjRAvL6arUviZGGJsBg6z0zsWA==",
+ "version": "1.2.0",
"requires": {
"debug": "2.6.9",
"encodeurl": "~1.0.2",
"escape-html": "~1.0.3",
- "on-finished": "~2.3.0",
+ "on-finished": "2.4.1",
"parseurl": "~1.3.3",
- "statuses": "~1.5.0",
+ "statuses": "2.0.1",
"unpipe": "~1.0.0"
}
},
+ "for-each": {
+ "version": "0.3.3",
+ "requires": {
+ "is-callable": "^1.1.3"
+ },
+ "dependencies": {
+ "is-callable": {
+ "version": "1.2.7",
+ "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.7.tgz",
+ "integrity": "sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA=="
+ }
+ }
+ },
"forever-agent": {
- "version": "0.6.1",
- "resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz",
- "integrity": "sha512-j0KLYPhm6zeac4lz3oJ3o65qvgQCcPubiyotZrXqEaG4hNagNYO8qdlUrX5vwqv9ohqeT/Z3j6+yW067yWWdUw=="
+ "version": "0.6.1"
},
"form-data": {
"version": "2.3.3",
- "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.3.3.tgz",
- "integrity": "sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ==",
"requires": {
"asynckit": "^0.4.0",
"combined-stream": "^1.0.6",
@@ -511,14 +2273,15 @@
}
},
"forwarded": {
- "version": "0.1.2",
- "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.1.2.tgz",
- "integrity": "sha1-mMI9qxF1ZXuMBXPozszZGw/xjIQ="
+ "version": "0.2.0"
},
"fresh": {
- "version": "0.5.2",
- "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz",
- "integrity": "sha1-PYyt2Q2XZWn6g1qx+OSyOhBWBac="
+ "version": "0.5.2"
+ },
+ "function-bind": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz",
+ "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA=="
},
"getpass": {
"version": "0.1.7",
@@ -528,53 +2291,115 @@
"assert-plus": "^1.0.0"
}
},
+ "gopd": {
+ "version": "1.0.1",
+ "requires": {
+ "get-intrinsic": "^1.1.3"
+ },
+ "dependencies": {
+ "get-intrinsic": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz",
+ "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==",
+ "requires": {
+ "es-errors": "^1.3.0",
+ "function-bind": "^1.1.2",
+ "has-proto": "^1.0.1",
+ "has-symbols": "^1.0.3",
+ "hasown": "^2.0.0"
+ }
+ }
+ }
+ },
"har-schema": {
- "version": "2.0.0",
- "resolved": "https://registry.npmjs.org/har-schema/-/har-schema-2.0.0.tgz",
- "integrity": "sha512-Oqluz6zhGX8cyRaTQlFMPw80bSJVG2x/cFb8ZPhUILGgHka9SsokCCOQgpveePerqidZOrT14ipqfJb7ILcW5Q=="
+ "version": "2.0.0"
},
"har-validator": {
"version": "5.1.5",
- "resolved": "https://registry.npmjs.org/har-validator/-/har-validator-5.1.5.tgz",
- "integrity": "sha512-nmT2T0lljbxdQZfspsno9hgrG3Uir6Ks5afism62poxqBM6sDnMEuPmzTq8XN0OEwqKLLdh1jQI3qyE66Nzb3w==",
"requires": {
"ajv": "^6.12.3",
"har-schema": "^2.0.0"
+ },
+ "dependencies": {
+ "ajv": {
+ "version": "6.12.6",
+ "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz",
+ "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==",
+ "requires": {
+ "fast-deep-equal": "^3.1.1",
+ "fast-json-stable-stringify": "^2.0.0",
+ "json-schema-traverse": "^0.4.1",
+ "uri-js": "^4.2.2"
+ }
+ }
+ }
+ },
+ "has-property-descriptors": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz",
+ "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==",
+ "requires": {
+ "es-define-property": "^1.0.0"
+ }
+ },
+ "has-proto": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.3.tgz",
+ "integrity": "sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q=="
+ },
+ "has-symbols": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz",
+ "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A=="
+ },
+ "hasown": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz",
+ "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==",
+ "requires": {
+ "function-bind": "^1.1.2"
}
},
"http-errors": {
- "version": "1.7.2",
- "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.7.2.tgz",
- "integrity": "sha512-uUQBt3H/cSIVfch6i1EuPNy/YsRSOUBXTVfZ+yR7Zjez3qjBz6i9+i4zjNaoqcoFVI4lQJ5plg63TvGfRSDCRg==",
+ "version": "2.0.0",
"requires": {
- "depd": "~1.1.2",
- "inherits": "2.0.3",
- "setprototypeof": "1.1.1",
- "statuses": ">= 1.5.0 < 2",
- "toidentifier": "1.0.0"
- },
- "dependencies": {
- "inherits": {
- "version": "2.0.3",
- "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz",
- "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4="
- }
+ "depd": "2.0.0",
+ "inherits": "2.0.4",
+ "setprototypeof": "1.2.0",
+ "statuses": "2.0.1",
+ "toidentifier": "1.0.1"
}
},
"http-signature": {
"version": "1.2.0",
- "resolved": "https://registry.npmjs.org/http-signature/-/http-signature-1.2.0.tgz",
- "integrity": "sha512-CAbnr6Rz4CYQkLYUtSNXxQPUH2gK8f3iWexVlsnMeD+GjlsQ0Xsy1cOX+mN3dtxYomRy21CiOzU8Uhw6OwncEQ==",
"requires": {
"assert-plus": "^1.0.0",
"jsprim": "^1.2.2",
"sshpk": "^1.7.0"
+ },
+ "dependencies": {
+ "sshpk": {
+ "version": "1.18.0",
+ "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.18.0.tgz",
+ "integrity": "sha512-2p2KJZTSqQ/I3+HX42EpYOa2l3f8Erv8MWKsy2I9uf4wA7yFIkXRffYdsx86y6z4vHtV8u7g+pPlr8/4ouAxsQ==",
+ "requires": {
+ "asn1": "~0.2.3",
+ "assert-plus": "^1.0.0",
+ "bcrypt-pbkdf": "^1.0.0",
+ "dashdash": "^1.12.0",
+ "ecc-jsbn": "~0.1.1",
+ "getpass": "^0.1.1",
+ "jsbn": "~0.1.0",
+ "safer-buffer": "^2.0.2",
+ "tweetnacl": "~0.14.0"
+ }
+ }
}
},
"http-status": {
- "version": "1.5.2",
- "resolved": "https://registry.npmjs.org/http-status/-/http-status-1.5.2.tgz",
- "integrity": "sha512-HzxX+/hV/8US1Gq4V6R6PgUmJ5Pt/DGATs4QhdEOpG8LrdS9/3UG2nnOvkqUpRks04yjVtV5p/NODjO+wvf6vg=="
+ "version": "1.5.3",
+ "resolved": "https://registry.npmjs.org/http-status/-/http-status-1.5.3.tgz",
+ "integrity": "sha512-jCClqdnnwigYslmtfb28vPplOgoiZ0siP2Z8C5Ua+3UKbx410v+c+jT+jh1bbI4TvcEySuX0vd/CfFZFbDkJeQ=="
},
"iconv-lite": {
"version": "0.4.24",
@@ -585,56 +2410,25 @@
}
},
"ieee754": {
- "version": "1.1.13",
- "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.1.13.tgz",
- "integrity": "sha512-4vf7I2LYV/HaWerSo3XmlMkp5eZ83i+/CDluXi/IGTs/O1sejBNhTtnxzmRZfvOUqj7lZjqHkeTvpgSFDlWZTg=="
+ "version": "1.1.13"
},
"inherits": {
- "version": "2.0.4",
- "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
- "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="
+ "version": "2.0.4"
},
"ini": {
- "version": "2.0.0",
- "resolved": "https://registry.npmjs.org/ini/-/ini-2.0.0.tgz",
- "integrity": "sha512-7PnF4oN3CvZF23ADhA5wRaYEQpJ8qygSkbtTXWBeXWXmEVRXK+1ITciHWwHhsjv1TmW0MgacIv6hEi5pX5NQdA=="
- },
- "ip-address": {
- "version": "6.1.0",
- "resolved": "https://registry.npmjs.org/ip-address/-/ip-address-6.1.0.tgz",
- "integrity": "sha512-u9YYtb1p2fWSbzpKmZ/b3QXWA+diRYPxc2c4y5lFB/MMk5WZ7wNZv8S3CFcIGVJ5XtlaCAl/FQy/D3eQ2XtdOA==",
- "requires": {
- "jsbn": "1.1.0",
- "lodash": "^4.17.15",
- "sprintf-js": "1.1.2"
- },
- "dependencies": {
- "jsbn": {
- "version": "1.1.0",
- "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-1.1.0.tgz",
- "integrity": "sha512-4bYVV3aAMtDTTu4+xsDYa6sy9GyJ69/amsu9sYF2zqjiEoZA5xJi3BrfX3uY+/IekIu7MwdObdbDWpoZdBv3/A=="
- }
- }
+ "version": "2.0.0"
},
"ipaddr.js": {
- "version": "1.9.0",
- "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.0.tgz",
- "integrity": "sha512-M4Sjn6N/+O6/IXSJseKqHoFc+5FdGJ22sXqnjTpdZweHK64MzEPAyQZyEU3R/KRv2GLoa7nNtg/C2Ev6m7z+eA=="
+ "version": "1.9.1"
},
"is-typedarray": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz",
- "integrity": "sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA=="
+ "version": "1.0.0"
},
"isarray": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz",
- "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE="
+ "version": "1.0.0"
},
"isstream": {
- "version": "0.1.2",
- "resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz",
- "integrity": "sha512-Yljz7ffyPbrLpLngrMtZ7NduUgVvi6wG9RJ9IUcyCd59YQ911PBJphODUcbOVbqYfxe1wuYf/LJ8PauMRwsM/g=="
+ "version": "0.1.2"
},
"jmespath": {
"version": "0.16.0",
@@ -657,19 +2451,20 @@
"integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg=="
},
"json-stringify-safe": {
- "version": "5.0.1",
- "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz",
- "integrity": "sha512-ZClg6AaYvamvYEE82d3Iyd3vSSIjQ+odgjaTzRuO3s7toCdFKczob2i0zCh7JE8kWn17yvAWhUVxvqGwUalsRA=="
+ "version": "5.0.1"
},
"jsonparse": {
- "version": "1.3.1",
- "resolved": "https://registry.npmjs.org/jsonparse/-/jsonparse-1.3.1.tgz",
- "integrity": "sha512-POQXvpdL69+CluYsillJ7SUhKvytYjW9vG/GKpnf+xP8UWgYEM/RaMzHHofbALDiKbbP1W8UEYmgGl39WkPZsg=="
+ "version": "1.3.1"
+ },
+ "JSONStream": {
+ "version": "1.3.5",
+ "requires": {
+ "jsonparse": "^1.2.0",
+ "through": ">=2.2.7 <3"
+ }
},
"jsprim": {
"version": "1.4.2",
- "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.2.tgz",
- "integrity": "sha512-P2bSOMAc/ciLz6DzgjVlGJP9+BrJWu5UDGK70C2iweC5QBIeFf0ZXRvGjEj2uYgrY2MkAAhsSWHDWlFtEroZWw==",
"requires": {
"assert-plus": "1.0.0",
"extsprintf": "1.3.0",
@@ -683,44 +2478,28 @@
"integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg=="
},
"lodash.escaperegexp": {
- "version": "4.1.2",
- "resolved": "https://registry.npmjs.org/lodash.escaperegexp/-/lodash.escaperegexp-4.1.2.tgz",
- "integrity": "sha512-TM9YBvyC84ZxE3rgfefxUWiQKLilstD6k7PTGt6wfbtXF8ixIJLOL3VYyV/z+ZiPLsVxAsKAFVwWlWeb2Y8Yyw=="
+ "version": "4.1.2"
},
"lodash.groupby": {
- "version": "4.6.0",
- "resolved": "https://registry.npmjs.org/lodash.groupby/-/lodash.groupby-4.6.0.tgz",
- "integrity": "sha512-5dcWxm23+VAoz+awKmBaiBvzox8+RqMgFhi7UvX9DHZr2HdxHXM/Wrf8cfKpsW37RNrvtPn6hSwNqurSILbmJw=="
+ "version": "4.6.0"
},
"lodash.isboolean": {
- "version": "3.0.3",
- "resolved": "https://registry.npmjs.org/lodash.isboolean/-/lodash.isboolean-3.0.3.tgz",
- "integrity": "sha512-Bz5mupy2SVbPHURB98VAcw+aHh4vRV5IPNhILUCsOzRmsTmSQ17jIuqopAentWoehktxGd9e/hbIXq980/1QJg=="
+ "version": "3.0.3"
},
"lodash.isequal": {
- "version": "4.5.0",
- "resolved": "https://registry.npmjs.org/lodash.isequal/-/lodash.isequal-4.5.0.tgz",
- "integrity": "sha512-pDo3lu8Jhfjqls6GkMgpahsF9kCyayhgykjyLMNFTKWrpVdAQtYyB4muAMWozBB4ig/dtWAmsMxLEI8wuz+DYQ=="
+ "version": "4.5.0"
},
"lodash.isfunction": {
- "version": "3.0.9",
- "resolved": "https://registry.npmjs.org/lodash.isfunction/-/lodash.isfunction-3.0.9.tgz",
- "integrity": "sha512-AirXNj15uRIMMPihnkInB4i3NHeb4iBtNg9WRWuK2o31S+ePwwNmDPaTL3o7dTJ+VXNZim7rFs4rxN4YU1oUJw=="
+ "version": "3.0.9"
},
"lodash.isnil": {
- "version": "4.0.0",
- "resolved": "https://registry.npmjs.org/lodash.isnil/-/lodash.isnil-4.0.0.tgz",
- "integrity": "sha512-up2Mzq3545mwVnMhTDMdfoG1OurpA/s5t88JmQX809eH3C8491iu2sfKhTfhQtKY78oPNhiaHJUpT/dUDAAtng=="
+ "version": "4.0.0"
},
"lodash.isundefined": {
- "version": "3.0.1",
- "resolved": "https://registry.npmjs.org/lodash.isundefined/-/lodash.isundefined-3.0.1.tgz",
- "integrity": "sha512-MXB1is3s899/cD8jheYYE2V9qTHwKvt+npCwpD+1Sxm3Q3cECXCiYHjeHWXNwr6Q0SOBPrYUDxendrO6goVTEA=="
+ "version": "3.0.1"
},
"lodash.uniq": {
- "version": "4.5.0",
- "resolved": "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz",
- "integrity": "sha512-xfBaXQd9ryd9dlSDvnvI0lvxfLJlYAZzXomUYzLKtUeOQvOP5piqAWuGtrhWeqaXK9hhoM/iyJc5AV+XfsX3HQ=="
+ "version": "4.5.0"
},
"lossless-json": {
"version": "1.0.5",
@@ -730,81 +2509,63 @@
"media-typer": {
"version": "0.3.0",
"resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz",
- "integrity": "sha1-hxDXrwqmJvj/+hzgAWhUUmMlV0g="
+ "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ=="
},
"merge-descriptors": {
- "version": "1.0.1",
- "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz",
- "integrity": "sha1-sAqqVW3YtEVoFQ7J0blT8/kMu2E="
+ "version": "1.0.1"
},
"method-missing": {
- "version": "1.2.4",
- "resolved": "https://registry.npmjs.org/method-missing/-/method-missing-1.2.4.tgz",
- "integrity": "sha512-tmj4CKZJVQd/ZuN9hnYD8HBAs/3RdDdqUeJG9RbVYlEZLuPYK4EW+EekMqLsCV4w1HastX+Pk2Ov87OQmeo01A=="
+ "version": "1.2.4"
},
"methods": {
- "version": "1.1.2",
- "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz",
- "integrity": "sha1-VSmk1nZUE07cxSZmVoNbD4Ua/O4="
+ "version": "1.1.2"
},
"mime": {
- "version": "1.6.0",
- "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz",
- "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg=="
- },
- "mime-db": {
- "version": "1.40.0",
- "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.40.0.tgz",
- "integrity": "sha512-jYdeOMPy9vnxEqFRRo6ZvTZ8d9oPb+k18PKoYNYUe2stVEBPPwsln/qWzdbmaIvnhZ9v2P+CuecK+fpUfsV2mA=="
+ "version": "1.6.0"
},
"mime-types": {
- "version": "2.1.24",
- "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.24.tgz",
- "integrity": "sha512-WaFHS3MCl5fapm3oLxU4eYDw77IQM2ACcxQ9RIxfaC3ooc6PFuBMGZZsYpvoXS5D5QTWPieo1jjLdAm3TBP3cQ==",
+ "version": "2.1.35",
"requires": {
- "mime-db": "1.40.0"
+ "mime-db": "1.52.0"
+ },
+ "dependencies": {
+ "mime-db": {
+ "version": "1.52.0",
+ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
+ "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg=="
+ }
}
},
"minimatch": {
"version": "3.1.2",
- "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz",
- "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==",
"requires": {
"brace-expansion": "^1.1.7"
}
},
"minimist": {
- "version": "1.2.6",
- "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.6.tgz",
- "integrity": "sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q=="
+ "version": "1.2.7",
+ "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.7.tgz",
+ "integrity": "sha512-bzfL1YUZsP41gmu/qjrEk0Q6i2ix/cVeAhbCbqH9u3zYutS1cLg00qhrD0M2MVdCcx4Sc0UpP2eBWo9rotpq6g=="
},
"ms": {
- "version": "2.0.0",
- "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
- "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g="
- },
- "negotiator": {
- "version": "0.6.2",
- "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.2.tgz",
- "integrity": "sha512-hZXc7K2e+PgeI1eDBe/10Ard4ekbfrrqG8Ep+8Jmf4JID2bNg7NvCPOZN+kfF574pFQI7mum2AUqDidoKqcTOw=="
+ "version": "2.0.0"
},
"oauth-sign": {
- "version": "0.9.0",
- "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.9.0.tgz",
- "integrity": "sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ=="
+ "version": "0.9.0"
+ },
+ "object-inspect": {
+ "version": "1.13.1",
+ "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.1.tgz",
+ "integrity": "sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ=="
},
"on-finished": {
- "version": "2.3.0",
- "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz",
- "integrity": "sha1-IPEzZIGwg811M3mSoWlxqi2QaUc=",
+ "version": "2.4.1",
"requires": {
"ee-first": "1.1.1"
}
},
"p-finally": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz",
- "integrity": "sha512-LICb2p9CB7FS+0eR1oqWnHhp0FljGLZCWBE9aix0Uye9W8LTQPwMTYVGWQWIw9RdQiDg4+epXQODwIYJtSJaow=="
+ "version": "1.0.0"
},
"p-queue": {
"version": "6.6.2",
@@ -813,30 +2574,29 @@
"requires": {
"eventemitter3": "^4.0.4",
"p-timeout": "^3.2.0"
+ },
+ "dependencies": {
+ "eventemitter3": {
+ "version": "4.0.7",
+ "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz",
+ "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw=="
+ }
}
},
"p-timeout": {
"version": "3.2.0",
- "resolved": "https://registry.npmjs.org/p-timeout/-/p-timeout-3.2.0.tgz",
- "integrity": "sha512-rhIwUycgwwKcP9yTOOFK/AKsAopjjCakVqLHePO3CC6Mir1Z99xT+R63jZxAT5lFZLa2inS5h+ZS2GvR99/FBg==",
"requires": {
"p-finally": "^1.0.0"
}
},
"parseurl": {
- "version": "1.3.3",
- "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz",
- "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ=="
+ "version": "1.3.3"
},
"path-to-regexp": {
- "version": "0.1.7",
- "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz",
- "integrity": "sha1-32BBeABfUi8V60SQ5yR6G/qmf4w="
+ "version": "0.1.7"
},
"performance-now": {
- "version": "2.1.0",
- "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz",
- "integrity": "sha512-7EAHlyLHI56VEIdK57uwHdHKIaAGbnXPiw0yWbarQZOKaKpvUIgW0jWRVLiatnM+XXlSwsanIBH/hzGMJulMow=="
+ "version": "2.1.0"
},
"process-nextick-args": {
"version": "2.0.1",
@@ -844,71 +2604,40 @@
"integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag=="
},
"proxy-addr": {
- "version": "2.0.5",
- "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.5.tgz",
- "integrity": "sha512-t/7RxHXPH6cJtP0pRG6smSr9QJidhB+3kXu0KgXnbGYMgzEnUxRQ4/LDdfOwZEMyIh3/xHb8PX3t+lfL9z+YVQ==",
+ "version": "2.0.7",
"requires": {
- "forwarded": "~0.1.2",
- "ipaddr.js": "1.9.0"
+ "forwarded": "0.2.0",
+ "ipaddr.js": "1.9.1"
}
},
- "psl": {
- "version": "1.8.0",
- "resolved": "https://registry.npmjs.org/psl/-/psl-1.8.0.tgz",
- "integrity": "sha512-RIdOzyoavK+hA18OGGWDqUTsCLhtA7IcZ/6NCs4fFJaHBDab+pDDmDIByWFRQJq2Cd7r1OoQxBGKOaztq+hjIQ=="
- },
"punycode": {
- "version": "1.3.2",
- "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.3.2.tgz",
- "integrity": "sha512-RofWgt/7fL5wP1Y7fxE7/EmTLzQVnB0ycyibJ0OOHIlJqTNzglYFxVwETOcIoJqJmpDXJ9xImDv+Fq34F/d4Dw=="
+ "version": "2.1.1"
},
"qs": {
- "version": "6.5.3",
- "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.3.tgz",
- "integrity": "sha512-qxXIEh4pCGfHICj1mAJQ2/2XVZkjCDTcEgfoSQxc/fYivUZxTkk7L3bDBJSoNrEzXI17oUO5Dp07ktqE5KzczA=="
+ "version": "6.11.0",
+ "resolved": "https://registry.npmjs.org/qs/-/qs-6.11.0.tgz",
+ "integrity": "sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==",
+ "requires": {
+ "side-channel": "^1.0.4"
+ }
},
"querystring": {
- "version": "0.2.0",
- "resolved": "https://registry.npmjs.org/querystring/-/querystring-0.2.0.tgz",
- "integrity": "sha512-X/xY82scca2tau62i9mDyU9K+I+djTMUsvwf7xnUX5GLvVzgJybOJf4Y6o9Zx3oJK/LSXg5tTZBjwzqVPaPO2g=="
+ "version": "0.2.0"
},
"range-parser": {
- "version": "1.2.1",
- "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz",
- "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg=="
+ "version": "1.2.1"
},
"raw-body": {
- "version": "2.4.0",
- "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.4.0.tgz",
- "integrity": "sha512-4Oz8DUIwdvoa5qMJelxipzi/iJIi40O5cGV1wNYp5hvZP8ZN0T+jiNkL0QepXs+EsQ9XJ8ipEDoiH70ySUJP3Q==",
+ "version": "2.5.2",
+ "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz",
+ "integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==",
"requires": {
- "bytes": "3.1.0",
- "http-errors": "1.7.2",
+ "bytes": "3.1.2",
+ "http-errors": "2.0.0",
"iconv-lite": "0.4.24",
"unpipe": "1.0.0"
}
},
- "readable-stream": {
- "version": "2.3.7",
- "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz",
- "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==",
- "requires": {
- "core-util-is": "~1.0.0",
- "inherits": "~2.0.3",
- "isarray": "~1.0.0",
- "process-nextick-args": "~2.0.0",
- "safe-buffer": "~5.1.1",
- "string_decoder": "~1.1.1",
- "util-deprecate": "~1.0.1"
- },
- "dependencies": {
- "safe-buffer": {
- "version": "5.1.2",
- "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
- "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g=="
- }
- }
- },
"request": {
"version": "2.88.2",
"resolved": "https://registry.npmjs.org/request/-/request-2.88.2.tgz",
@@ -934,6 +2663,18 @@
"tough-cookie": "~2.5.0",
"tunnel-agent": "^0.6.0",
"uuid": "^3.3.2"
+ },
+ "dependencies": {
+ "qs": {
+ "version": "6.5.3",
+ "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.3.tgz",
+ "integrity": "sha512-qxXIEh4pCGfHICj1mAJQ2/2XVZkjCDTcEgfoSQxc/fYivUZxTkk7L3bDBJSoNrEzXI17oUO5Dp07ktqE5KzczA=="
+ },
+ "uuid": {
+ "version": "3.4.0",
+ "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz",
+ "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A=="
+ }
}
},
"requestretry": {
@@ -947,34 +2688,47 @@
},
"s3-stream-upload": {
"version": "2.0.2",
- "resolved": "https://registry.npmjs.org/s3-stream-upload/-/s3-stream-upload-2.0.2.tgz",
- "integrity": "sha512-hSfGZ4InIUMH29niWCAkcDvmOGwADSy7j2Ktm6+nKI+ub6nPoLOboo1D+Q3mEIutTHu0J4+Sv92J0GOk5hAonQ==",
"requires": {
"buffer-queue": "~1.0.0",
"readable-stream": "^2.3.0"
+ },
+ "dependencies": {
+ "readable-stream": {
+ "version": "2.3.8",
+ "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz",
+ "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==",
+ "requires": {
+ "core-util-is": "~1.0.0",
+ "inherits": "~2.0.3",
+ "isarray": "~1.0.0",
+ "process-nextick-args": "~2.0.0",
+ "safe-buffer": "~5.1.1",
+ "string_decoder": "~1.1.1",
+ "util-deprecate": "~1.0.1"
+ }
+ },
+ "safe-buffer": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
+ "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g=="
+ }
}
},
"s3signed": {
"version": "0.1.0",
- "resolved": "https://registry.npmjs.org/s3signed/-/s3signed-0.1.0.tgz",
- "integrity": "sha512-08Jc0+GAaFjXgvl8qQytu6+wVBfcUUyCJDocj5kBUeq9YA+6mAM/6psDNxrg4PVkkLBvAK75mnjlaGckfOtDKA==",
"requires": {
"aws-sdk": "^2.0.4"
}
},
"s3urls": {
"version": "1.5.2",
- "resolved": "https://registry.npmjs.org/s3urls/-/s3urls-1.5.2.tgz",
- "integrity": "sha512-3f4kprxnwAqoiVdR/XFoc997YEt0b6oY1VKrhl+kuWnHaUQ2cVe73TcQaww8geX5FKPuGBHl90xv70q7SlbBew==",
"requires": {
"minimist": "^1.1.0",
"s3signed": "^0.1.0"
}
},
"safe-buffer": {
- "version": "5.2.1",
- "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz",
- "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ=="
+ "version": "5.2.1"
},
"safer-buffer": {
"version": "2.1.2",
@@ -982,9 +2736,7 @@
"integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg=="
},
"sax": {
- "version": "1.2.1",
- "resolved": "https://registry.npmjs.org/sax/-/sax-1.2.1.tgz",
- "integrity": "sha512-8I2a3LovHTOpm7NV5yOyO8IHqgVsfK4+UuySrXU8YXkSRX7k6hCV9b3HrkKCr3nMpgj+0bmocaJJWpvp1oc7ZA=="
+ "version": "1.2.1"
},
"semver": {
"version": "5.7.1",
@@ -992,68 +2744,123 @@
"integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ=="
},
"send": {
- "version": "0.17.1",
- "resolved": "https://registry.npmjs.org/send/-/send-0.17.1.tgz",
- "integrity": "sha512-BsVKsiGcQMFwT8UxypobUKyv7irCNRHk1T0G680vk88yf6LBByGcZJOTJCrTP2xVN6yI+XjPJcNuE3V4fT9sAg==",
+ "version": "0.18.0",
"requires": {
"debug": "2.6.9",
- "depd": "~1.1.2",
- "destroy": "~1.0.4",
+ "depd": "2.0.0",
+ "destroy": "1.2.0",
"encodeurl": "~1.0.2",
"escape-html": "~1.0.3",
"etag": "~1.8.1",
"fresh": "0.5.2",
- "http-errors": "~1.7.2",
+ "http-errors": "2.0.0",
"mime": "1.6.0",
- "ms": "2.1.1",
- "on-finished": "~2.3.0",
+ "ms": "2.1.3",
+ "on-finished": "2.4.1",
"range-parser": "~1.2.1",
- "statuses": "~1.5.0"
+ "statuses": "2.0.1"
},
"dependencies": {
"ms": {
- "version": "2.1.1",
- "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.1.tgz",
- "integrity": "sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg=="
+ "version": "2.1.3"
}
}
},
"serve-static": {
- "version": "1.14.1",
- "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.14.1.tgz",
- "integrity": "sha512-JMrvUwE54emCYWlTI+hGrGv5I8dEwmco/00EvkzIIsR7MqrHonbD9pO2MOfFnpFntl7ecpZs+3mW+XbQZu9QCg==",
+ "version": "1.15.0",
"requires": {
"encodeurl": "~1.0.2",
"escape-html": "~1.0.3",
"parseurl": "~1.3.3",
- "send": "0.17.1"
+ "send": "0.18.0"
+ }
+ },
+ "set-function-length": {
+ "version": "1.2.2",
+ "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz",
+ "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==",
+ "requires": {
+ "define-data-property": "^1.1.4",
+ "es-errors": "^1.3.0",
+ "function-bind": "^1.1.2",
+ "get-intrinsic": "^1.2.4",
+ "gopd": "^1.0.1",
+ "has-property-descriptors": "^1.0.2"
+ },
+ "dependencies": {
+ "get-intrinsic": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz",
+ "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==",
+ "requires": {
+ "es-errors": "^1.3.0",
+ "function-bind": "^1.1.2",
+ "has-proto": "^1.0.1",
+ "has-symbols": "^1.0.3",
+ "hasown": "^2.0.0"
+ }
+ }
}
},
"setprototypeof": {
- "version": "1.1.1",
- "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.1.tgz",
- "integrity": "sha512-JvdAWfbXeIGaZ9cILp38HntZSFSo3mWg6xGcJJsd+d4aRMOqauag1C63dJfDw7OaMYwEbHMOxEZ1lqVRYP2OAw=="
+ "version": "1.2.0"
+ },
+ "side-channel": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.6.tgz",
+ "integrity": "sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==",
+ "requires": {
+ "call-bind": "^1.0.7",
+ "es-errors": "^1.3.0",
+ "get-intrinsic": "^1.2.4",
+ "object-inspect": "^1.13.1"
+ },
+ "dependencies": {
+ "get-intrinsic": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz",
+ "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==",
+ "requires": {
+ "es-errors": "^1.3.0",
+ "function-bind": "^1.1.2",
+ "has-proto": "^1.0.1",
+ "has-symbols": "^1.0.3",
+ "hasown": "^2.0.0"
+ }
+ }
+ }
},
"socks5-client": {
"version": "1.2.8",
- "resolved": "https://registry.npmjs.org/socks5-client/-/socks5-client-1.2.8.tgz",
- "integrity": "sha512-js8WqQ/JjZS3IQwUxRwSThvXzcRIHE8sde8nE5q7nqxiFGb8EoHmNJ9SF2lXqn3ux6pUV3+InH7ng7mANK6XfA==",
"requires": {
"ip-address": "~6.1.0"
+ },
+ "dependencies": {
+ "ip-address": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/ip-address/-/ip-address-6.1.0.tgz",
+ "integrity": "sha512-u9YYtb1p2fWSbzpKmZ/b3QXWA+diRYPxc2c4y5lFB/MMk5WZ7wNZv8S3CFcIGVJ5XtlaCAl/FQy/D3eQ2XtdOA==",
+ "requires": {
+ "jsbn": "1.1.0",
+ "lodash": "^4.17.15",
+ "sprintf-js": "1.1.2"
+ }
+ },
+ "jsbn": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-1.1.0.tgz",
+ "integrity": "sha512-4bYVV3aAMtDTTu4+xsDYa6sy9GyJ69/amsu9sYF2zqjiEoZA5xJi3BrfX3uY+/IekIu7MwdObdbDWpoZdBv3/A=="
+ }
}
},
"socks5-http-client": {
"version": "1.0.4",
- "resolved": "https://registry.npmjs.org/socks5-http-client/-/socks5-http-client-1.0.4.tgz",
- "integrity": "sha512-K16meYkltPtps6yDOqK9Mwlfz+pdD2kQQQ/TCO/gu2AImUmfO6nF2uXX1YWrPs4NCfClQNih19wqLXmuUcZCrA==",
"requires": {
"socks5-client": "~1.2.6"
}
},
"socks5-https-client": {
"version": "1.2.1",
- "resolved": "https://registry.npmjs.org/socks5-https-client/-/socks5-https-client-1.2.1.tgz",
- "integrity": "sha512-FbZ/X/2Xq3DAMhuRA4bnN0jy1QxaPTVPLFvyv6CEj0QDKSTdWp9yRxo1JhqXmWKhPQeJyUMajHJB2UjT43pFcw==",
"requires": {
"socks5-client": "~1.2.3"
}
@@ -1063,26 +2870,8 @@
"resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.1.2.tgz",
"integrity": "sha512-VE0SOVEHCk7Qc8ulkWw3ntAzXuqf7S2lvwQaDLRnUeIEaKNQJzV6BwmLKhOqT61aGhfUMrXeaBk+oDGCzvhcug=="
},
- "sshpk": {
- "version": "1.17.0",
- "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.17.0.tgz",
- "integrity": "sha512-/9HIEs1ZXGhSPE8X6Ccm7Nam1z8KcoCqPdI7ecm1N33EzAetWahvQWVqLZtaZQ+IDKX4IyA2o0gBzqIMkAagHQ==",
- "requires": {
- "asn1": "~0.2.3",
- "assert-plus": "^1.0.0",
- "bcrypt-pbkdf": "^1.0.0",
- "dashdash": "^1.12.0",
- "ecc-jsbn": "~0.1.1",
- "getpass": "^0.1.1",
- "jsbn": "~0.1.0",
- "safer-buffer": "^2.0.2",
- "tweetnacl": "~0.14.0"
- }
- },
"statuses": {
- "version": "1.5.0",
- "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz",
- "integrity": "sha1-Fhx9rBd2Wf2YEfQ3cfqZOBR4Yow="
+ "version": "2.0.1"
},
"string_decoder": {
"version": "1.1.1",
@@ -1100,35 +2889,27 @@
}
},
"through": {
- "version": "2.3.8",
- "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz",
- "integrity": "sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg=="
+ "version": "2.3.8"
},
"toidentifier": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.0.tgz",
- "integrity": "sha512-yaOH/Pk/VEhBWWTlhI+qXxDFXlejDGcQipMlyxda9nthulaxLZUNcUqFxokp0vcYnvteJln5FNQDRrxj3YcbVw=="
+ "version": "1.0.1"
},
"tough-cookie": {
"version": "2.5.0",
- "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.5.0.tgz",
- "integrity": "sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g==",
"requires": {
"psl": "^1.1.28",
"punycode": "^2.1.1"
},
"dependencies": {
- "punycode": {
- "version": "2.1.1",
- "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz",
- "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A=="
+ "psl": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/psl/-/psl-1.9.0.tgz",
+ "integrity": "sha512-E/ZsdU4HLs/68gYzgGTkMicWTLPdAftJLfJFlLUAAKZGkStNU72sZjT66SnMDVOfOWY/YAoiD7Jxa9iHvngcag=="
}
}
},
"tunnel-agent": {
"version": "0.6.0",
- "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz",
- "integrity": "sha512-McnNiV1l8RYeY8tBgEpuodCC1mLUdbSN+CYBL7kJsJNInOP8UjDDEwdk6Mw60vdLLrr5NHKZhMAOSrR2NZuQ+w==",
"requires": {
"safe-buffer": "^5.0.1"
}
@@ -1148,9 +2929,7 @@
}
},
"unpipe": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz",
- "integrity": "sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw="
+ "version": "1.0.0"
},
"uri-js": {
"version": "4.4.1",
@@ -1158,22 +2937,87 @@
"integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==",
"requires": {
"punycode": "^2.1.0"
- },
- "dependencies": {
- "punycode": {
- "version": "2.1.1",
- "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz",
- "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A=="
- }
}
},
"url": {
"version": "0.10.3",
- "resolved": "https://registry.npmjs.org/url/-/url-0.10.3.tgz",
- "integrity": "sha512-hzSUW2q06EqL1gKM/a+obYHLIO6ct2hwPuviqTTOcfFVc61UbfJ2Q32+uGL/HCPxKqrdGB5QUwIe7UqlDgwsOQ==",
"requires": {
"punycode": "1.3.2",
"querystring": "0.2.0"
+ },
+ "dependencies": {
+ "punycode": {
+ "version": "1.3.2"
+ }
+ }
+ },
+ "util": {
+ "version": "0.12.5",
+ "resolved": "https://registry.npmjs.org/util/-/util-0.12.5.tgz",
+ "integrity": "sha512-kZf/K6hEIrWHI6XqOFUiiMa+79wE/D8Q+NCNAWclkyg3b4d2k7s0QGepNjiABc+aR3N1PAyHL7p6UcLY6LmrnA==",
+ "requires": {
+ "inherits": "^2.0.3",
+ "is-arguments": "^1.0.4",
+ "is-generator-function": "^1.0.7",
+ "is-typed-array": "^1.1.3",
+ "which-typed-array": "^1.1.2"
+ },
+ "dependencies": {
+ "available-typed-arrays": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.5.tgz",
+ "integrity": "sha512-DMD0KiN46eipeziST1LPP/STfDU0sufISXmjSgvVsoU2tqxctQeASejWcfNtxYKqETM1UxQ8sp2OrSBWpHY6sw=="
+ },
+ "has-tostringtag": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.0.tgz",
+ "integrity": "sha512-kFjcSNhnlGV1kyoGk7OXKSawH5JOb/LzUc5w9B02hOTO0dfFRjbHQKvg1d6cf3HbeUmtU9VbbV3qzZ2Teh97WQ==",
+ "requires": {
+ "has-symbols": "^1.0.2"
+ }
+ },
+ "is-arguments": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/is-arguments/-/is-arguments-1.1.1.tgz",
+ "integrity": "sha512-8Q7EARjzEnKpt/PCD7e1cgUS0a6X8u5tdSiMqXhojOdoV9TsMsiO+9VLC5vAmO8N7/GmXn7yjR8qnA6bVAEzfA==",
+ "requires": {
+ "call-bind": "^1.0.2",
+ "has-tostringtag": "^1.0.0"
+ }
+ },
+ "is-generator-function": {
+ "version": "1.0.10",
+ "resolved": "https://registry.npmjs.org/is-generator-function/-/is-generator-function-1.0.10.tgz",
+ "integrity": "sha512-jsEjy9l3yiXEQ+PsXdmBwEPcOxaXWLspKdplFUVI9vq1iZgIekeC0L167qeu86czQaxed3q/Uzuw0swL0irL8A==",
+ "requires": {
+ "has-tostringtag": "^1.0.0"
+ }
+ },
+ "is-typed-array": {
+ "version": "1.1.10",
+ "resolved": "https://registry.npmjs.org/is-typed-array/-/is-typed-array-1.1.10.tgz",
+ "integrity": "sha512-PJqgEHiWZvMpaFZ3uTc8kHPM4+4ADTlDniuQL7cU/UDA0Ql7F70yGfHph3cLNe+c9toaigv+DFzTJKhc2CtO6A==",
+ "requires": {
+ "available-typed-arrays": "^1.0.5",
+ "call-bind": "^1.0.2",
+ "for-each": "^0.3.3",
+ "gopd": "^1.0.1",
+ "has-tostringtag": "^1.0.0"
+ }
+ },
+ "which-typed-array": {
+ "version": "1.1.9",
+ "resolved": "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.9.tgz",
+ "integrity": "sha512-w9c4xkx6mPidwp7180ckYWfMmvxpjlZuIudNtDf4N/tTAUB8VJbX25qZoAsrtGuYNnGw3pa0AXgbGKRB8/EceA==",
+ "requires": {
+ "available-typed-arrays": "^1.0.5",
+ "call-bind": "^1.0.2",
+ "for-each": "^0.3.3",
+ "gopd": "^1.0.1",
+ "has-tostringtag": "^1.0.0",
+ "is-typed-array": "^1.1.10"
+ }
+ }
}
},
"util-deprecate": {
@@ -1182,43 +3026,38 @@
"integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw=="
},
"utils-merge": {
- "version": "1.0.1",
- "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz",
- "integrity": "sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM="
+ "version": "1.0.1"
},
"uuid": {
- "version": "3.3.2",
- "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.3.2.tgz",
- "integrity": "sha512-yXJmeNaw3DnnKAOKJE51sL/ZaYfWJRl1pK9dr19YFCu0ObS231AB1/LbqTKRAQ5kw8A90rA6fr4riOUpTZvQZA=="
+ "version": "8.0.0",
+ "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.0.0.tgz",
+ "integrity": "sha512-jOXGuXZAWdsTH7eZLtyXMqUb9EcWMGZNbL9YcGBJl4MH4nrxHmZJhEHvyLFrkxo+28uLb/NYRcStH48fnD0Vzw=="
},
"vary": {
- "version": "1.1.2",
- "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz",
- "integrity": "sha1-IpnwLG3tMNSllhsLn3RSShj2NPw="
+ "version": "1.1.2"
},
"verror": {
"version": "1.10.0",
- "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.0.tgz",
- "integrity": "sha512-ZZKSmDAEFOijERBLkmYfJ+vmk3w+7hOLYDNkRCuRuMJGEmqYNCNLyBBFwWKVMhfwaEF3WOd0Zlw86U/WC/+nYw==",
"requires": {
"assert-plus": "^1.0.0",
"core-util-is": "1.0.2",
"extsprintf": "^1.2.0"
+ },
+ "dependencies": {
+ "core-util-is": {
+ "version": "1.0.2"
+ }
}
},
"xml2js": {
"version": "0.4.19",
- "resolved": "https://registry.npmjs.org/xml2js/-/xml2js-0.4.19.tgz",
- "integrity": "sha512-esZnJZJOiJR9wWKMyuvSE1y6Dq5LCuJanqhxslH2bxM6duahNZ+HMpCLhBQGZkbX6xRf8x1Y2eJlgt2q3qo49Q==",
"requires": {
"sax": ">=0.6.0",
"xmlbuilder": "~9.0.1"
}
},
"xmlbuilder": {
- "version": "9.0.7",
- "resolved": "https://registry.npmjs.org/xmlbuilder/-/xmlbuilder-9.0.7.tgz",
- "integrity": "sha512-7YXTQc3P2l9+0rjaUbLwMKRhtmwg1M1eDf6nag7urC7pIPYLD9W/jmzQ4ptRSUbodw5S0jfoGTflLemQibSpeQ=="
+ "version": "9.0.7"
}
}
}
diff --git a/package.json b/package.json
index fd2761a59..716d9a161 100644
--- a/package.json
+++ b/package.json
@@ -11,7 +11,7 @@
"async": "^3.2.2",
"aws-sdk": "^2.814.0",
"elasticdump": "^6.84.1",
- "express": "^4.17.1",
+ "express": "^4.19.2",
"json-schema": "^0.4.0",
"minimatch": "^3.0.5",
"minimist": "^1.2.6",
diff --git a/packer/configs/web_wildcard_whitelist b/packer/configs/web_wildcard_whitelist
index c58eeefe8..621dec3d5 100644
--- a/packer/configs/web_wildcard_whitelist
+++ b/packer/configs/web_wildcard_whitelist
@@ -44,4 +44,5 @@
.yahooapis.com
.cloudfront.net
.docker.io
+.blob.core.windows.net
.googleapis.com
diff --git a/tf_files/aws/eks/sample.tfvars b/tf_files/aws/eks/sample.tfvars
index da176e73e..06b4b309b 100644
--- a/tf_files/aws/eks/sample.tfvars
+++ b/tf_files/aws/eks/sample.tfvars
@@ -122,7 +122,7 @@ fips = false
fips_ami_kms = "arn:aws:kms:us-east-1:707767160287:key/mrk-697897f040ef45b0aa3cebf38a916f99"
#This is the FIPS enabled AMI in cdistest account
-fips_enabled_ami = "ami-0de87e3680dcb13ec"
+fips_enabled_ami = "ami-074d352c8e753fc93"
#A list of AZs to be used by EKS nodes
availability_zones = ["us-east-1a", "us-east-1c", "us-east-1d"]
diff --git a/tf_files/aws/eks/variables.tf b/tf_files/aws/eks/variables.tf
index 0dc78a8ab..6adbaad6b 100644
--- a/tf_files/aws/eks/variables.tf
+++ b/tf_files/aws/eks/variables.tf
@@ -162,7 +162,7 @@ variable "fips_ami_kms" {
# This is the FIPS enabled AMI in cdistest account.
variable "fips_enabled_ami" {
- default = "ami-0de87e3680dcb13ec"
+ default = "ami-074d352c8e753fc93"
}
variable "availability_zones" {
diff --git a/tf_files/aws/kubecost/root.tf b/tf_files/aws/kubecost/root.tf
index 261f4419b..aa03555e3 100644
--- a/tf_files/aws/kubecost/root.tf
+++ b/tf_files/aws/kubecost/root.tf
@@ -10,9 +10,10 @@ terraform {
}
locals {
- account_id = data.aws_caller_identity.current.account_id
- region = data.aws_region.current.name
- cur_bucket = var.cur_s3_bucket != "" ? var.cur_s3_bucket : aws_s3_bucket.cur-bucket.0.id
+ account_id = data.aws_caller_identity.current.account_id
+ region = data.aws_region.current.name
+ cur_bucket = var.cur_s3_bucket != "" ? var.cur_s3_bucket : aws_s3_bucket.cur-bucket.0.id
+ report_bucket = var.report_s3_bucket != "" ? var.report_s3_bucket : local.cur_bucket
}
# The Cost and Usage report, create in any configuration
@@ -29,7 +30,7 @@ resource "aws_cur_report_definition" "kubecost-cur" {
report_versioning = "OVERWRITE_REPORT"
}
-# The bucket used by the Cost and Usage report, will be created in master/standalone setup
+# The bucket used by the Cost and Usage report
resource "aws_s3_bucket" "cur-bucket" {
count = var.cur_s3_bucket != "" ? 0 : 1
bucket = "${var.vpc_name}-kubecost-bucket"
@@ -52,7 +53,7 @@ resource "aws_s3_bucket" "cur-bucket" {
}
-# The Policy attached to the Cost and Usage report bucket, Will attach permissions to each for master/slave account and allow permissions to root slave account so SA's can read/write to bucket
+# The Policy attached to the Cost and Usage report bucket
resource "aws_s3_bucket_policy" "cur-bucket-policy" {
count = var.cur_s3_bucket != "" ? 0 : 1
bucket = aws_s3_bucket.cur-bucket[count.index].id
@@ -89,55 +90,11 @@ resource "aws_s3_bucket_policy" "cur-bucket-policy" {
"aws:SourceAccount" = local.account_id
}
}
- },
- {
- Sid = "Stmt1335892150623"
- Effect = "Allow"
- Principal = {
- Service = "billingreports.amazonaws.com"
- }
- Action = ["s3:GetBucketAcl","s3:GetBucketPolicy"]
- Resource = "arn:aws:s3:::${aws_s3_bucket.cur-bucket[count.index].id}"
- Condition = {
- StringEquals = {
- "aws:SourceArn" = "arn:aws:cur:us-east-1:${var.slave_account_id != "" ? var.slave_account_id : local.account_id}}:definition/*"
- "aws:SourceAccount" = var.slave_account_id
- }
- }
- },
- {
- Sid = "Stmt1335892526598"
- Effect = "Allow"
- Principal = {
- Service = "billingreports.amazonaws.com"
- }
- Action = "s3:PutObject"
- Resource = "arn:aws:s3:::${aws_s3_bucket.cur-bucket[count.index].id}/*"
- Condition = {
- StringEquals = {
- "aws:SourceArn" = "arn:aws:cur:us-east-1:${var.slave_account_id != "" ? var.slave_account_id : local.account_id}:definition/*"
- "aws:SourceAccount" = local.account_id
- }
- }
- },
- {
- Sid = "Stmt1335892526597"
- Effect = "Allow"
- Principal = {
- AWS = "arn:aws:iam::${var.slave_account_id != "" ? var.slave_account_id : local.account_id}:root"
- }
- Action = ["s3:GetBucketAcl","s3:GetBucketPolicy","s3:PutObject","s3:ListBucket","s3:GetObject","s3:DeleteObject","s3:PutObjectAcl"]
- Resource = ["arn:aws:s3:::${aws_s3_bucket.cur-bucket[count.index].id}/*","arn:aws:s3:::${aws_s3_bucket.cur-bucket[count.index].id}"]
}
]
})
}
-
-
-
-
-
# An IAM user used to connect kubecost to CUR/Glue/Athena, not used for SA setup
#resource "aws_iam_user" "kubecost-user" {
# name = "${var.vpc_name}-kubecost-user"
@@ -153,7 +110,7 @@ resource "aws_s3_bucket_policy" "cur-bucket-policy" {
# user = aws_iam_user.kubecost-user.name
#}
-# Policy to attach to the user, will attach permissions to terraform created bucket if master/standalone or to specified bucket if slave
+# Policy to attach to the user
resource "aws_iam_policy" "thanos-user-policy" {
name = "${var.vpc_name}-Kubecost-CUR-policy"
path = "/"
@@ -185,16 +142,16 @@ resource "aws_iam_policy" "thanos-user-policy" {
{
Sid = "S3ReadAccessToAwsBillingData"
Effect = "Allow"
- Action = ["s3:Get*","s3:List*"]
+ Action = ["s3:*"]
Resource = ["arn:aws:s3:::${local.cur_bucket}","arn:aws:s3:::${local.cur_bucket}/*"]
}
]
})
}
-# Policy to attach to the user, will attach permissions to terraform created bucket if master/standalone or to specified bucket if slave
-resource "aws_iam_policy" "kubecost-user-policy" {
- name = "${var.vpc_name}-Kubecost-Thanos-policy"
+# Policy to attach to the reports user
+resource "aws_iam_policy" "report-user-policy" {
+ name = "${var.vpc_name}-Kubecost-report-policy"
path = "/"
description = "Policy for Thanos to have access to centralized bucket."
@@ -207,28 +164,13 @@ resource "aws_iam_policy" "kubecost-user-policy" {
Sid = "Statement",
Effect = "Allow",
Action = ["s3:ListBucket","s3:GetObject","s3:DeleteObject","s3:PutObject","s3:PutObjectAcl"],
- Resource = ["arn:aws:s3:::${local.cur_bucket}/*","arn:aws:s3:::${local.cur_bucket}"]
+ Resource = ["arn:aws:s3:::${local.report_bucket}/*","arn:aws:s3:::${local.report_bucket}"]
}
]
})
}
-
-# Policy attachment of the kubecost user policy to the kubecost user
-#resource "aws_iam_user_policy_attachment" "kubecost-user-policy-attachment" {
-# user = aws_iam_user.kubecost-user.name
-# policy_arn = aws_iam_policy.kubecost-user-policy.arn
-#}
-
-
-
-
-
-
-
-
-
-# Role for the glue crawler, used for every configuration, s3 bucket will either be from terraform, or specified master bucket
+# Role for the glue crawler, used for every configuration, s3 bucket will either be from terraform, or specified
resource "aws_iam_role" "glue-crawler-role" {
name = "AWSCURCrawlerComponentFunction-${var.vpc_name}"
managed_policy_arns = ["arn:aws:iam::aws:policy/service-role/AWSGlueServiceRole"]
@@ -326,7 +268,7 @@ resource "aws_iam_role" "cur-initializer-lambda-role" {
}
}
-# Role for the s3 notification lambda, used for every configuration, s3 bucket will either be from terraform, or specified master bucket
+# Role for the s3 notification lambda, used for every configuration, s3 bucket will either be from terraform, or specified bucket
resource "aws_iam_role" "cur-s3-notification-lambda-role" {
name = "AWSS3CURLambdaExecutor-${var.vpc_name}"
@@ -370,7 +312,7 @@ resource "aws_glue_catalog_database" "cur-glue-database" {
name = "athenacurcfn_${var.vpc_name}"
}
-# Glue crawler, used for every configuration, s3 bucket will either be from terraform, or specified master bucket
+# Glue crawler, used for every configuration, s3 bucket will either be from terraform, or specified bucket
resource "aws_glue_crawler" "cur-glue-crawler" {
database_name = aws_glue_catalog_database.cur-glue-database.name
name = "${var.vpc_name}-AWSCURCrawler"
@@ -383,7 +325,7 @@ resource "aws_glue_crawler" "cur-glue-crawler" {
}
}
-# Glue catalog table, used for every configuration, s3 bucket will either be from terraform, or specified master bucket
+# Glue catalog table, used for every configuration, s3 bucket will either be from terraform, or specified bucket
resource "aws_glue_catalog_table" "cur-glue-catalog" {
database_name = aws_glue_catalog_database.cur-glue-database.name
name = "${var.vpc_name}-cost_and_usage_data_status"
@@ -421,7 +363,7 @@ resource "aws_lambda_function" "cur-initializer-lambda" {
runtime = "nodejs12.x"
}
-# permissions for lambda, used for every configuration, s3 bucket will either be from terraform, or specified master bucket
+# permissions for lambda, used for every configuration, s3 bucket will either be from terraform, or specified bucket
resource "aws_lambda_permission" "cur-initializer-lambda-permission" {
action = "lambda:InvokeFunction"
function_name = aws_lambda_function.cur-initializer-lambda.function_name
@@ -445,4 +387,3 @@ resource "aws_lambda_function" "cur-s3-notification-lambda" {
}
}
}
-
diff --git a/tf_files/aws/kubecost/sample.tfvars b/tf_files/aws/kubecost/sample.tfvars
index 540bd88a1..659136efa 100644
--- a/tf_files/aws/kubecost/sample.tfvars
+++ b/tf_files/aws/kubecost/sample.tfvars
@@ -3,12 +3,8 @@
#The name of the VPC to bring these resources up in
vpc_name = ""
-#This is used if the resource is set up as a secondary node
-parent_account_id = ""
-
#The S3 bucket in which to store the generated Cost and Usage report
cur_s3_bucket = ""
-#This is used if the resource is set up as a primary node. It specifies the account ID for the linked secondary node
-slave_account_id = ""
-
+#The S3 bucket in which to store the kubecost daily reports
+report_s3_bucket = ""
diff --git a/tf_files/aws/kubecost/variables.tf b/tf_files/aws/kubecost/variables.tf
index 786c82083..d1335847a 100644
--- a/tf_files/aws/kubecost/variables.tf
+++ b/tf_files/aws/kubecost/variables.tf
@@ -2,18 +2,10 @@ variable "vpc_name" {
default = ""
}
-# If slave setup
-
-variable "parent_account_id" {
- default = ""
-}
-
variable "cur_s3_bucket" {
default = ""
}
-# If master setup
-
-variable "slave_account_id" {
+variable "report_s3_bucket" {
default = ""
}
diff --git a/tf_files/aws/modules/account-management-logs/cloud.tf b/tf_files/aws/modules/account-management-logs/cloud.tf
index e8dbe191e..2b54389a8 100644
--- a/tf_files/aws/modules/account-management-logs/cloud.tf
+++ b/tf_files/aws/modules/account-management-logs/cloud.tf
@@ -10,7 +10,6 @@
resource "aws_s3_bucket" "management-logs_bucket" {
bucket = "${var.account_name}-management-logs"
- acl = "private"
tags = {
Environment = "${var.account_name}"
diff --git a/tf_files/aws/modules/common-logging/logging.tf b/tf_files/aws/modules/common-logging/logging.tf
index 98103f243..e9d292a4d 100644
--- a/tf_files/aws/modules/common-logging/logging.tf
+++ b/tf_files/aws/modules/common-logging/logging.tf
@@ -3,7 +3,6 @@
resource "aws_s3_bucket" "common_logging_bucket" {
bucket = "${var.common_name}-logging"
- acl = "private"
tags = {
Environment = "${var.common_name}"
diff --git a/tf_files/aws/modules/eks-nodepool/cloud.tf b/tf_files/aws/modules/eks-nodepool/cloud.tf
index 3001ce9a5..1cdedd964 100644
--- a/tf_files/aws/modules/eks-nodepool/cloud.tf
+++ b/tf_files/aws/modules/eks-nodepool/cloud.tf
@@ -162,6 +162,11 @@ resource "aws_iam_role_policy_attachment" "eks-node-AmazonEKS_CNI_Policy" {
role = "${aws_iam_role.eks_node_role.name}"
}
+resource "aws_iam_role_policy_attachment" "eks-node-AmazonEKSCSIDriverPolicy" {
+ policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEBSCSIDriverPolicy"
+ role = "${aws_iam_role.eks_node_role.name}"
+}
+
resource "aws_iam_role_policy_attachment" "eks-node-AmazonEC2ContainerRegistryReadOnly" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
role = "${aws_iam_role.eks_node_role.name}"
@@ -273,7 +278,7 @@ resource "aws_security_group_rule" "nodes_interpool_communications" {
resource "aws_launch_configuration" "eks_launch_configuration" {
associate_public_ip_address = false
iam_instance_profile = "${aws_iam_instance_profile.eks_node_instance_profile.name}"
- image_id = "${data.aws_ami.eks_worker.id}"
+ image_id = "${var.fips_enabled_ami}"
instance_type = "${var.nodepool_instance_type}"
name_prefix = "eks-${var.vpc_name}-nodepool-${var.nodepool}"
security_groups = ["${aws_security_group.eks_nodes_sg.id}", "${aws_security_group.ssh.id}"]
diff --git a/tf_files/aws/modules/eks-nodepool/data.tf b/tf_files/aws/modules/eks-nodepool/data.tf
index a695b84fd..3d1df3ee0 100644
--- a/tf_files/aws/modules/eks-nodepool/data.tf
+++ b/tf_files/aws/modules/eks-nodepool/data.tf
@@ -22,21 +22,6 @@ data "aws_availability_zones" "available" {
state = "available"
}
-
-# First, let us create a data source to fetch the latest Amazon Machine Image (AMI) that Amazon provides with an
-# EKS compatible Kubernetes baked in.
-
-data "aws_ami" "eks_worker" {
- filter {
- name = "name"
- # values = ["${var.eks_version == "1.10" ? "amazon-eks-node-1.10*" : "amazon-eks-node-1.11*"}"]
- values = ["amazon-eks-node-${var.eks_version}*"]
- }
-
- most_recent = true
- owners = ["602401143452"] # Amazon Account ID
-}
-
#data "aws_eks_cluster" "eks_cluster" {
# name = "${var.vpc_name}"
#}
diff --git a/tf_files/aws/modules/eks-nodepool/variables.tf b/tf_files/aws/modules/eks-nodepool/variables.tf
index 401866f6d..944d56623 100644
--- a/tf_files/aws/modules/eks-nodepool/variables.tf
+++ b/tf_files/aws/modules/eks-nodepool/variables.tf
@@ -79,4 +79,9 @@ variable "activation_id" {
variable "customer_id" {
default = ""
-}
\ No newline at end of file
+}
+
+# This is the FIPS enabled AMI in cdistest account.
+variable "fips_enabled_ami" {
+ default = "ami-074d352c8e753fc93"
+}
diff --git a/tf_files/aws/modules/eks/cloud.tf b/tf_files/aws/modules/eks/cloud.tf
index 7d313f0fb..693462b1c 100644
--- a/tf_files/aws/modules/eks/cloud.tf
+++ b/tf_files/aws/modules/eks/cloud.tf
@@ -37,6 +37,7 @@ module "jupyter_pool" {
nodepool_asg_min_size = "${var.jupyter_asg_min_size}"
activation_id = "${var.activation_id}"
customer_id = "${var.customer_id}"
+ fips_enabled_ami = "${local.ami}"
}
module "workflow_pool" {
@@ -62,6 +63,7 @@ module "workflow_pool" {
nodepool_asg_min_size = "${var.workflow_asg_min_size}"
activation_id = "${var.activation_id}"
customer_id = "${var.customer_id}"
+ fips_enabled_ami = "${local.ami}"
}
@@ -398,6 +400,12 @@ resource "aws_iam_policy" "asg_access" {
"autoscaling:DescribeLaunchConfigurations"
],
"Resource": "*"
+ },
+ {
+ "Sid": "VisualEditor0",
+ "Effect": "Allow",
+ "Action": "ec2:CreateTags",
+ "Resource": "arn:aws:ec2:*:${data.aws_caller_identity.current.account_id}:instance/*"
}
]
}
@@ -421,6 +429,11 @@ resource "aws_iam_role_policy_attachment" "eks-node-AmazonEKS_CNI_Policy" {
role = "${aws_iam_role.eks_node_role.name}"
}
+resource "aws_iam_role_policy_attachment" "eks-node-AmazonEKSCSIDriverPolicy" {
+ policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEBSCSIDriverPolicy"
+ role = "${aws_iam_role.eks_node_role.name}"
+}
+
resource "aws_iam_role_policy_attachment" "eks-node-AmazonEC2ContainerRegistryReadOnly" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
role = "${aws_iam_role.eks_node_role.name}"
diff --git a/tf_files/aws/modules/eks/variables.tf b/tf_files/aws/modules/eks/variables.tf
index 3eefa456c..2d7cfb5ba 100644
--- a/tf_files/aws/modules/eks/variables.tf
+++ b/tf_files/aws/modules/eks/variables.tf
@@ -175,5 +175,5 @@ variable "fips_ami_kms" {
# This is the FIPS enabled AMI in cdistest account.
variable "fips_enabled_ami" {
- default = "ami-0de87e3680dcb13ec"
+ default = "ami-074d352c8e753fc93"
}
diff --git a/tf_files/aws/modules/management-logs/logging.tf b/tf_files/aws/modules/management-logs/logging.tf
index 80b4a7931..ef7db77e8 100644
--- a/tf_files/aws/modules/management-logs/logging.tf
+++ b/tf_files/aws/modules/management-logs/logging.tf
@@ -3,7 +3,6 @@
resource "aws_s3_bucket" "management-logs_bucket" {
bucket = "${var.log_bucket_name}"
- acl = "private"
tags = {
Environment = "ALL"
diff --git a/tf_files/aws/modules/s3-bucket/cloud.tf b/tf_files/aws/modules/s3-bucket/cloud.tf
index 110b8fe4c..cc8cc3fba 100644
--- a/tf_files/aws/modules/s3-bucket/cloud.tf
+++ b/tf_files/aws/modules/s3-bucket/cloud.tf
@@ -16,7 +16,6 @@ module "cdis_s3_logs" {
resource "aws_s3_bucket" "mybucket" {
bucket = "${local.clean_bucket_name}"
- acl = "private"
server_side_encryption_configuration {
rule {
diff --git a/tf_files/aws/modules/s3-logs/cloud.tf b/tf_files/aws/modules/s3-logs/cloud.tf
index 5f7a45705..e4569bd9e 100644
--- a/tf_files/aws/modules/s3-logs/cloud.tf
+++ b/tf_files/aws/modules/s3-logs/cloud.tf
@@ -10,7 +10,6 @@ terraform {
resource "aws_s3_bucket" "log_bucket" {
bucket = "${local.clean_bucket_name}"
- acl = "log-delivery-write"
server_side_encryption_configuration {
rule {
diff --git a/tf_files/aws/modules/upload-data-bucket/s3.tf b/tf_files/aws/modules/upload-data-bucket/s3.tf
index af9cc19cf..041d5184f 100644
--- a/tf_files/aws/modules/upload-data-bucket/s3.tf
+++ b/tf_files/aws/modules/upload-data-bucket/s3.tf
@@ -3,7 +3,6 @@
resource "aws_s3_bucket" "data_bucket" {
bucket = "${var.vpc_name}-data-bucket"
- acl = "private"
server_side_encryption_configuration {
rule {
@@ -56,9 +55,6 @@ resource "aws_s3_bucket_notification" "bucket_notification" {
resource "aws_s3_bucket" "log_bucket" {
bucket = "${var.vpc_name}-data-bucket-logs"
- acl = "bucket-owner-full-control" #log-delivery-write
- acl = "log-delivery-write"
-
server_side_encryption_configuration {
rule {
diff --git a/tf_files/aws/modules/vpn_nlb_central_csoc/cloud.tf b/tf_files/aws/modules/vpn_nlb_central_csoc/cloud.tf
index b2ebdb15a..a0c59f439 100644
--- a/tf_files/aws/modules/vpn_nlb_central_csoc/cloud.tf
+++ b/tf_files/aws/modules/vpn_nlb_central_csoc/cloud.tf
@@ -485,7 +485,6 @@ resource "aws_route53_record" "vpn-nlb" {
resource "aws_s3_bucket" "vpn-certs-and-files" {
bucket = "vpn-certs-and-files-${var.env_vpn_nlb_name}"
- acl = "private"
versioning {
enabled = true
diff --git a/tf_files/aws/nextflow_ami_pipeline/data.tf b/tf_files/aws/nextflow_ami_pipeline/data.tf
new file mode 100644
index 000000000..a8b950b2a
--- /dev/null
+++ b/tf_files/aws/nextflow_ami_pipeline/data.tf
@@ -0,0 +1,24 @@
+data "aws_vpc" "selected" {
+ filter {
+ name = "tag:Name"
+ values = [var.vpc_name]
+ }
+}
+
+data "aws_security_group" "default" {
+ vpc_id = data.aws_vpc.selected.id
+
+ filter {
+ name = "group-name"
+ values = ["default"]
+ }
+}
+
+data "aws_subnet" "private" {
+ vpc_id = data.aws_vpc.selected.id
+
+ filter {
+ name = "tag:Name"
+ values = [var.subnet_name]
+ }
+}
diff --git a/tf_files/aws/nextflow_ami_pipeline/iam.tf b/tf_files/aws/nextflow_ami_pipeline/iam.tf
new file mode 100644
index 000000000..0b3594dd4
--- /dev/null
+++ b/tf_files/aws/nextflow_ami_pipeline/iam.tf
@@ -0,0 +1,36 @@
+## IAM Instance Profile for image builder
+
+resource "aws_iam_role" "image_builder" {
+ name = "EC2InstanceProfileForImageBuilder-nextflow"
+ assume_role_policy = data.aws_iam_policy_document.assume_role.json
+}
+
+data "aws_iam_policy_document" "assume_role" {
+ statement {
+ actions = ["sts:AssumeRole"]
+ principals {
+ type = "Service"
+ identifiers = ["ec2.amazonaws.com"]
+ }
+ }
+}
+
+resource "aws_iam_role_policy_attachment" "amazon_ssm" {
+ role = aws_iam_role.image_builder.name
+ policy_arn = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"
+}
+
+resource "aws_iam_role_policy_attachment" "image_builder" {
+ role = aws_iam_role.image_builder.name
+ policy_arn = "arn:aws:iam::aws:policy/EC2InstanceProfileForImageBuilder"
+}
+
+resource "aws_iam_role_policy_attachment" "image_builder_ecr" {
+ role = aws_iam_role.image_builder.name
+ policy_arn = "arn:aws:iam::aws:policy/EC2InstanceProfileForImageBuilderECRContainerBuilds"
+}
+
+resource "aws_iam_instance_profile" "image_builder" {
+ name = "image-builder-profile"
+ role = aws_iam_role.image_builder.name
+}
diff --git a/tf_files/aws/nextflow_ami_pipeline/imagebuilder.tf b/tf_files/aws/nextflow_ami_pipeline/imagebuilder.tf
new file mode 100644
index 000000000..0c3415003
--- /dev/null
+++ b/tf_files/aws/nextflow_ami_pipeline/imagebuilder.tf
@@ -0,0 +1,161 @@
+## Image builder component to install AWS cli using conda
+
+resource "aws_imagebuilder_component" "install_software" {
+ name = "InstallSoftware"
+ platform = "Linux"
+ version = "1.0.1"
+
+ data = yamlencode({
+ name = "InstallSoftware"
+ description = "Installs bzip2, wget, Miniconda3 and awscli"
+ schemaVersion = 1.0
+
+ phases = [{
+ name = "build"
+ steps = [{
+ name = "InstallPackages"
+ action = "ExecuteBash"
+ inputs = {
+ commands = [
+ "sudo yum install -y bzip2 wget"
+ ]
+ }
+ },
+ {
+ name = "InstallMiniconda"
+ action = "ExecuteBash"
+ inputs = {
+ commands = [
+ "sudo su ec2-user",
+ "mkdir -p /home/ec2-user",
+ "export HOME=/home/ec2-user/",
+ "cd $HOME",
+ "# Download and install miniconda in ec2-user's home dir",
+ "wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda-install.sh",
+ "bash miniconda-install.sh -b -f -p /home/ec2-user/miniconda",
+ "rm miniconda-install.sh"
+ ]
+ }
+ },
+ {
+ name = "InstallAWSCLI"
+ action = "ExecuteBash"
+ inputs = {
+ commands = [
+ "export HOME=/home/ec2-user/",
+ "/home/ec2-user/miniconda/bin/conda install -c conda-forge -y awscli"
+ ]
+ }
+ }]
+ },
+ {
+ name = "validate"
+ steps = [{
+ name = "CheckInstalls"
+ action = "ExecuteBash"
+ inputs = {
+ commands = [
+ "which bzip2",
+ "which wget",
+ "which conda",
+ "/home/ec2-user/miniconda/bin/conda list | grep awscli"
+ ]
+ }
+ }]
+ },
+ {
+ name = "test"
+ steps = [{
+ name = "TestAWSCLI"
+ action = "ExecuteBash"
+ inputs = {
+ commands = [
+ "/home/ec2-user/miniconda/bin/aws --version"
+ ]
+ }
+ }]
+ }]
+ })
+}
+
+
+## Image builder infrastructure config
+resource "aws_imagebuilder_infrastructure_configuration" "image_builder" {
+ name = "nextflow-infra-config"
+ instance_profile_name = aws_iam_instance_profile.image_builder.name
+ security_group_ids = [data.aws_security_group.default.id]
+ subnet_id = data.aws_subnet.private.id
+ terminate_instance_on_failure = true
+}
+
+
+## Make sure the ami produced is public
+
+resource "aws_imagebuilder_distribution_configuration" "public_ami" {
+ name = "public-ami-distribution"
+
+ distribution {
+ ami_distribution_configuration {
+ name = "gen3-nextflow-{{ imagebuilder:buildDate }}"
+
+ ami_tags = {
+ Role = "Public Image"
+ }
+
+ launch_permission {
+ user_groups = ["all"]
+ }
+ }
+
+ region = "us-east-1"
+ }
+}
+
+
+## Image recipe
+resource "aws_imagebuilder_image_recipe" "recipe" {
+ name = "nextflow-fips-recipe"
+
+ parent_image = var.base_image
+
+ version = "1.0.0"
+
+ block_device_mapping {
+ device_name = "/dev/xvda"
+ ebs {
+ delete_on_termination = true
+ volume_size = 30
+ volume_type = "gp2"
+ encrypted = false
+ }
+ }
+
+ user_data_base64 = try(base64encode(var.user_data), null)
+
+ component {
+ component_arn = "arn:aws:imagebuilder:us-east-1:aws:component/docker-ce-linux/1.0.0/1"
+ }
+
+ component {
+ component_arn = aws_imagebuilder_component.install_software.arn
+ }
+
+
+
+}
+
+
+# Image builder pipeline
+
+resource "aws_imagebuilder_image_pipeline" "nextflow" {
+ image_recipe_arn = aws_imagebuilder_image_recipe.recipe.arn
+ infrastructure_configuration_arn = aws_imagebuilder_infrastructure_configuration.image_builder.arn
+ name = "nextflow-fips"
+
+ distribution_configuration_arn = aws_imagebuilder_distribution_configuration.public_ami.arn
+
+ image_scanning_configuration {
+ image_scanning_enabled = true
+ }
+
+}
diff --git a/tf_files/aws/nextflow_ami_pipeline/manifest.json b/tf_files/aws/nextflow_ami_pipeline/manifest.json
new file mode 100644
index 000000000..62394dc4a
--- /dev/null
+++ b/tf_files/aws/nextflow_ami_pipeline/manifest.json
@@ -0,0 +1,6 @@
+{
+ "terraform": {
+ "module_version" : "0.12"
+ }
+ }
+
\ No newline at end of file
diff --git a/tf_files/aws/nextflow_ami_pipeline/root.tf b/tf_files/aws/nextflow_ami_pipeline/root.tf
new file mode 100644
index 000000000..8ccad5e14
--- /dev/null
+++ b/tf_files/aws/nextflow_ami_pipeline/root.tf
@@ -0,0 +1,7 @@
+# Inject credentials via the AWS_PROFILE environment variable and shared credentials file
+# and/or EC2 metadata service
+terraform {
+ backend "s3" {
+ encrypt = "true"
+ }
+}
\ No newline at end of file
diff --git a/tf_files/aws/nextflow_ami_pipeline/sample.tfvars b/tf_files/aws/nextflow_ami_pipeline/sample.tfvars
new file mode 100644
index 000000000..e6423d359
--- /dev/null
+++ b/tf_files/aws/nextflow_ami_pipeline/sample.tfvars
@@ -0,0 +1 @@
+vpc_name = "devplanetv2"
\ No newline at end of file
diff --git a/tf_files/aws/nextflow_ami_pipeline/variables.tf b/tf_files/aws/nextflow_ami_pipeline/variables.tf
new file mode 100644
index 000000000..58af6430f
--- /dev/null
+++ b/tf_files/aws/nextflow_ami_pipeline/variables.tf
@@ -0,0 +1,28 @@
+variable "vpc_name" {
+ type = string
+}
+
+
+variable "subnet_name" {
+ type = string
+ default = "eks_private_0"
+}
+
+variable "base_image" {
+ type = string
+ default = "arn:aws:imagebuilder:us-east-1:aws:image/amazon-linux-2-ecs-optimized-kernel-5-x86/x.x.x"
+}
+
+variable "user_data" {
+ type = string
+ default = <> /opt/fips-install.log
+sudo dracut -f
+# configure grub
+sudo /sbin/grubby --update-kernel=ALL --args="fips=1"
+EOT
+}
\ No newline at end of file