From f8c50654619a03769360840e863ce0b05a4b82e0 Mon Sep 17 00:00:00 2001 From: burtonk <117617405+k-burt-uch@users.noreply.github.com> Date: Thu, 5 Dec 2024 09:28:26 -0600 Subject: [PATCH 1/4] feat(BDC-326): Adds gen3-user-data-library (#2674) --- .gitignore | 1 + gen3/bin/kube-roll-all.sh | 6 ++ gen3/bin/kube-setup-gen3-user-data-library.sh | 70 +++++++++++++++++++ .../gen3-user-data-library-deploy.yaml | 70 +++++++++++++++++++ .../gen3-user-data-library-service.yaml | 21 ++++++ .../gen3-user-data-library-service.conf | 12 ++++ 6 files changed, 180 insertions(+) create mode 100644 gen3/bin/kube-setup-gen3-user-data-library.sh create mode 100644 kube/services/gen3-user-data-library/gen3-user-data-library-deploy.yaml create mode 100644 kube/services/gen3-user-data-library/gen3-user-data-library-service.yaml create mode 100644 kube/services/revproxy/gen3.nginx.conf/gen3-user-data-library-service.conf diff --git a/.gitignore b/.gitignore index 299bdc807..12870ff28 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ +.idea Chef/nodes/ tf_files/*output/* tf_files/terraform.tfstate diff --git a/gen3/bin/kube-roll-all.sh b/gen3/bin/kube-roll-all.sh index 6357f0788..9334f0cde 100644 --- a/gen3/bin/kube-roll-all.sh +++ b/gen3/bin/kube-roll-all.sh @@ -249,6 +249,12 @@ else gen3_log_info "not deploying gen3-discovery-ai - no manifest entry for '.versions[\"gen3-discovery-ai\"]'" fi +if g3k_manifest_lookup '.versions["gen3-user-data-library"]' 2> /dev/null; then + gen3 kube-setup-gen3-user-data-library & +else + gen3_log_info "not deploying gen3-user-data-library - no manifest entry for '.versions[\"gen3-user-data-library\"]'" +fi + if g3k_manifest_lookup '.versions["ohdsi-atlas"]' && g3k_manifest_lookup '.versions["ohdsi-webapi"]' 2> /dev/null; then gen3 kube-setup-ohdsi & else diff --git a/gen3/bin/kube-setup-gen3-user-data-library.sh b/gen3/bin/kube-setup-gen3-user-data-library.sh new file mode 100644 index 000000000..215142d81 --- /dev/null +++ b/gen3/bin/kube-setup-gen3-user-data-library.sh @@ -0,0 +1,70 @@ +#!/bin/bash +# +# Deploy the gen3-user-data-library service +# + +source "${GEN3_HOME}/gen3/lib/utils.sh" +gen3_load "gen3/gen3setup" + +setup_database() { + gen3_log_info "setting up gen3-user-data-library service ..." + + if g3kubectl describe secret gen3userdatalibrary-g3auto > /dev/null 2>&1; then + gen3_log_info "gen3userdatalibrary-g3auto secret already configured" + return 0 + fi + if [[ -n "$JENKINS_HOME" || ! -f "$(gen3_secrets_folder)/creds.json" ]]; then + gen3_log_err "skipping db setup in non-adminvm environment" + return 0 + fi + # Setup .env file that gen3-user-data-library service consumes + if [[ ! -f "$secretsFolder/gen3-user-data-library.env" || ! -f "$secretsFolder/base64Authz.txt" ]]; then + local secretsFolder="$(gen3_secrets_folder)/g3auto/gen3userdatalibrary" + + if [[ ! -f "$secretsFolder/dbcreds.json" ]]; then + if ! gen3 db setup gen3userdatalibrary; then + gen3_log_err "Failed setting up database for gen3-user-data-library service" + return 1 + fi + fi + if [[ ! -f "$secretsFolder/dbcreds.json" ]]; then + gen3_log_err "dbcreds not present in Gen3Secrets/" + return 1 + fi + + # go ahead and rotate the password whenever we regen this file + local password="$(gen3 random)" + local db_host=$(jq -r .db_host < "$secretsFolder/dbcreds.json") + local db_user=$(jq -r .db_username < "$secretsFolder/dbcreds.json") + local db_password=$(jq -r .db_password < "$secretsFolder/dbcreds.json") + local db_database=$(jq -r .db_database < "$secretsFolder/dbcreds.json") + cat - > "$secretsFolder/gen3-user-data-library.env" < "$secretsFolder/base64Authz.txt" + fi + gen3 secrets sync 'setup gen3userdatalibrary-g3auto secrets' +} + + +if ! setup_database; then + gen3_log_err "kube-setup-gen3-user-data-library bailing out - database failed setup" + exit 1 +fi + +if ! g3k_manifest_lookup '.versions."gen3-user-data-library"' 2> /dev/null; then + gen3_log_info "kube-setup-gen3-user-data-library exiting - gen3-user-data-library service not in manifest" + exit 0 +fi + +gen3 roll gen3-user-data-library +g3kubectl apply -f "${GEN3_HOME}/kube/services/gen3-user-data-library/gen3-user-data-library-service.yaml" + +if [[ -z "$GEN3_ROLL_ALL" ]]; then + gen3 kube-setup-networkpolicy + gen3 kube-setup-revproxy +fi + +gen3_log_info "The gen3-user-data-library service has been deployed onto the kubernetes cluster" diff --git a/kube/services/gen3-user-data-library/gen3-user-data-library-deploy.yaml b/kube/services/gen3-user-data-library/gen3-user-data-library-deploy.yaml new file mode 100644 index 000000000..00e777ad9 --- /dev/null +++ b/kube/services/gen3-user-data-library/gen3-user-data-library-deploy.yaml @@ -0,0 +1,70 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gen3-user-data-library-deployment +spec: + selector: + # Only select pods based on the 'app' label + matchLabels: + app: gen3-user-data-library + release: production + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + template: + metadata: + labels: + app: gen3-user-data-library + release: production + public: "yes" + netnolimit: "yes" + GEN3_DATE_LABEL + spec: + volumes: + - name: gen3-user-data-library-g3auto-volume + secret: + secretName: gen3userdatalibrary-g3auto + containers: + - name: gen3-user-data-library + GEN3_GEN3-USER-DATA-LIBRARY_IMAGE + ports: + - containerPort: 8080 + env: + - name: GEN3_DEBUG + GEN3_DEBUG_FLAG|-value: "False"-| + - name: ANONYMIZED_TELEMETRY + value: "False" + volumeMounts: + - name: gen3-user-data-library-g3auto-volume + readOnly: true + mountPath: /gen3userdatalibrary/.env + subPath: gen3-user-data-library.env + imagePullPolicy: Always + resources: + requests: + cpu: 1 + memory: 1024Mi + limits: + cpu: 2 + memory: 2048Mi + initContainers: + - name: gen3-user-data-library-db-migrate + GEN3_GEN3-USER-DATA-LIBRARY_IMAGE + imagePullPolicy: Always + volumeMounts: + - name: gen3-user-data-library-g3auto-volume + readOnly: true + mountPath: /gen3userdatalibrary/.env + subPath: gen3-user-data-library.env + resources: + limits: + cpu: 0.8 + memory: 512Mi + command: [ "/bin/sh" ] + args: + - "-c" + - | + # Managing virtual environments via poetry instead of python since the AL base image update, but retaining backwards compatibility + poetry run alembic upgrade head || /env/bin/alembic upgrade head \ No newline at end of file diff --git a/kube/services/gen3-user-data-library/gen3-user-data-library-service.yaml b/kube/services/gen3-user-data-library/gen3-user-data-library-service.yaml new file mode 100644 index 000000000..b3760d66f --- /dev/null +++ b/kube/services/gen3-user-data-library/gen3-user-data-library-service.yaml @@ -0,0 +1,21 @@ +kind: Service +apiVersion: v1 +metadata: + name: gen3-user-data-library-service +spec: + selector: + app: gen3-user-data-library + release: production + ports: + - protocol: TCP + port: 80 + targetPort: 8000 + name: http + nodePort: null + - protocol: TCP + port: 443 + targetPort: 443 + name: https + nodePort: null + type: ClusterIP + diff --git a/kube/services/revproxy/gen3.nginx.conf/gen3-user-data-library-service.conf b/kube/services/revproxy/gen3.nginx.conf/gen3-user-data-library-service.conf new file mode 100644 index 000000000..996aa07f9 --- /dev/null +++ b/kube/services/revproxy/gen3.nginx.conf/gen3-user-data-library-service.conf @@ -0,0 +1,12 @@ + location /library { + if ($csrf_check !~ ^ok-\S.+$) { + return 403 "failed csrf check"; + } + + set $proxy_service "gen3-user-data-library-service"; + set $upstream http://gen3-user-data-library-service$des_domain; + rewrite ^/library/(.*) /$1 break; + proxy_pass $upstream; + proxy_redirect http://$host/ https://$host/library/; + client_max_body_size 0; + } From 05aecc07a055def73cf15694a3b8e0aca712e508 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Mon, 9 Dec 2024 09:32:16 -0600 Subject: [PATCH 2/4] chore(remove-dd): removed references to datadog in cloud-auto (#2676) * chore(remove-dd): removed references to datadog in cloud-auto * chore(remove-dd): Added annotation to expected fence result --- .secrets.baseline | 446 +----------------- Docker/jenkins/Jenkins-CI-Worker/Dockerfile | 2 +- .../python3.10-buster/dockerrun.sh | 7 - .../python3.6-alpine3.7/dockerrun.sh | 7 - .../python3.6-buster/dockerrun.sh | 7 - .../python3.9-buster/dockerrun.sh | 7 - gen3/bin/kube-setup-aurora-monitoring.sh | 167 ------- gen3/bin/kube-setup-aws-es-proxy.sh | 3 +- gen3/bin/kube-setup-datadog.sh | 96 ---- gen3/lib/g3k_manifest.sh | 4 +- .../expectedFenceResult.yaml | 37 +- .../expectedSheepdogResult.yaml | 34 +- .../argo-wrapper/argo-wrapper-deploy.yaml | 1 - kube/services/argo/values.yaml | 14 - .../cedar-wrapper/cedar-wrapper-deploy.yaml | 28 -- .../cohort-middleware-deploy.yaml | 29 -- .../services/datadog/datadog-application.yaml | 27 -- kube/services/datadog/datadog-namespace.yaml | 7 - kube/services/datadog/datadog_db_user.json | 4 - kube/services/datadog/postgres.yaml | 8 - kube/services/datadog/values.yaml | 342 -------------- .../dicom-server/dicom-server-deploy.yaml | 25 - .../dicom-viewer/dicom-viewer-deploy.yaml | 25 - kube/services/fence/fence-deploy.yaml | 29 -- kube/services/guppy/guppy-deploy.yaml | 29 -- kube/services/hatchery/hatchery-deploy.yaml | 29 -- kube/services/indexd/indexd-deploy.yaml | 29 -- .../jenkins-worker/jenkins-worker-deploy.yaml | 4 - .../jenkins2-worker-deploy.yaml | 4 - .../gen3/services/datadog_netpolicy.yaml | 25 - .../ohif-viewer/ohif-viewer-deploy.yaml | 24 - kube/services/orthanc/orthanc-deploy.yaml | 24 - kube/services/peregrine/peregrine-deploy.yaml | 29 -- kube/services/pidgin/pidgin-deploy.yaml | 29 -- .../presigned-url-fence-deploy.yaml | 29 -- kube/services/sheepdog/sheepdog-deploy.yaml | 29 -- kube/services/wts/wts-deploy.yaml | 29 -- 37 files changed, 18 insertions(+), 1651 deletions(-) delete mode 100644 gen3/bin/kube-setup-aurora-monitoring.sh delete mode 100644 gen3/bin/kube-setup-datadog.sh delete mode 100644 kube/services/datadog/datadog-application.yaml delete mode 100644 kube/services/datadog/datadog-namespace.yaml delete mode 100644 kube/services/datadog/datadog_db_user.json delete mode 100644 kube/services/datadog/postgres.yaml delete mode 100644 kube/services/datadog/values.yaml delete mode 100644 kube/services/netpolicy/gen3/services/datadog_netpolicy.yaml diff --git a/.secrets.baseline b/.secrets.baseline index ededd2dff..6cca4f5dd 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -98,6 +98,10 @@ "path": "detect_secrets.filters.common.is_ignored_due_to_verification_policies", "min_level": 2 }, + { + "path": "detect_secrets.filters.gibberish.should_exclude_secret", + "limit": 3.7 + }, { "path": "detect_secrets.filters.heuristic.is_indirect_reference" }, @@ -560,101 +564,6 @@ "line_number": 658 } ], - "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml": [ - { - "type": "Secret Keyword", - "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml", - "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", - "is_verified": false, - "line_number": 71 - }, - { - "type": "Secret Keyword", - "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml", - "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", - "is_verified": false, - "line_number": 74 - }, - { - "type": "Secret Keyword", - "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml", - "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", - "is_verified": false, - "line_number": 77 - }, - { - "type": "Secret Keyword", - "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml", - "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", - "is_verified": false, - "line_number": 87 - }, - { - "type": "Secret Keyword", - "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml", - "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", - "is_verified": false, - "line_number": 90 - }, - { - "type": "Secret Keyword", - "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml", - "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", - "is_verified": false, - "line_number": 93 - }, - { - "type": "Secret Keyword", - "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml", - "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457", - "is_verified": false, - "line_number": 96 - }, - { - "type": "Secret Keyword", - "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml", - "hashed_secret": "0849046cdafcdb17f5a4bf5c528430d5e04ad295", - "is_verified": false, - "line_number": 99 - }, - { - "type": "Secret Keyword", - "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml", - "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", - "is_verified": false, - "line_number": 102 - } - ], - "gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml": [ - { - "type": "Secret Keyword", - "filename": "gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml", - "hashed_secret": "ec9c944c51e87322de8d22e3ca9e2be1ad8fee0d", - "is_verified": false, - "line_number": 63 - }, - { - "type": "Secret Keyword", - "filename": "gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml", - "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc", - "is_verified": false, - "line_number": 66 - }, - { - "type": "Secret Keyword", - "filename": "gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml", - "hashed_secret": "e43756046ad1763d6946575fed0e05130a154bd2", - "is_verified": false, - "line_number": 72 - }, - { - "type": "Secret Keyword", - "filename": "gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml", - "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", - "is_verified": false, - "line_number": 75 - } - ], "gen3/test/secretsTest.sh": [ { "type": "Secret Keyword", @@ -822,22 +731,6 @@ "line_number": 47 } ], - "kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml": [ - { - "type": "Secret Keyword", - "filename": "kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml", - "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", - "is_verified": false, - "line_number": 56 - }, - { - "type": "Secret Keyword", - "filename": "kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml", - "hashed_secret": "5949b79e0c7082dc78d543cde662871a4f8b8913", - "is_verified": false, - "line_number": 59 - } - ], "kube/services/cogwheel/cogwheel-deploy.yaml": [ { "type": "Secret Keyword", @@ -847,15 +740,6 @@ "line_number": 35 } ], - "kube/services/cohort-middleware/cohort-middleware-deploy.yaml": [ - { - "type": "Secret Keyword", - "filename": "kube/services/cohort-middleware/cohort-middleware-deploy.yaml", - "hashed_secret": "bf22f6c4bd03572f1ef593efc3eb1a7e0b6dcab4", - "is_verified": false, - "line_number": 62 - } - ], "kube/services/dashboard/dashboard-deploy.yaml": [ { "type": "Secret Keyword", @@ -911,15 +795,6 @@ "line_number": 79 } ], - "kube/services/dicom-server/dicom-server-deploy.yaml": [ - { - "type": "Secret Keyword", - "filename": "kube/services/dicom-server/dicom-server-deploy.yaml", - "hashed_secret": "706168ac2565a93cceffe2202ac45d3d31c075fb", - "is_verified": false, - "line_number": 41 - } - ], "kube/services/fence/fence-canary-deploy.yaml": [ { "type": "Secret Keyword", @@ -985,71 +860,6 @@ "line_number": 99 } ], - "kube/services/fence/fence-deploy.yaml": [ - { - "type": "Secret Keyword", - "filename": "kube/services/fence/fence-deploy.yaml", - "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", - "is_verified": false, - "line_number": 72 - }, - { - "type": "Secret Keyword", - "filename": "kube/services/fence/fence-deploy.yaml", - "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", - "is_verified": false, - "line_number": 75 - }, - { - "type": "Secret Keyword", - "filename": "kube/services/fence/fence-deploy.yaml", - "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", - "is_verified": false, - "line_number": 78 - }, - { - "type": "Secret Keyword", - "filename": "kube/services/fence/fence-deploy.yaml", - "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", - "is_verified": false, - "line_number": 88 - }, - { - "type": "Secret Keyword", - "filename": "kube/services/fence/fence-deploy.yaml", - "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", - "is_verified": false, - "line_number": 91 - }, - { - "type": "Secret Keyword", - "filename": "kube/services/fence/fence-deploy.yaml", - "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", - "is_verified": false, - "line_number": 94 - }, - { - "type": "Secret Keyword", - "filename": "kube/services/fence/fence-deploy.yaml", - "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457", - "is_verified": false, - "line_number": 97 - }, - { - "type": "Secret Keyword", - "filename": "kube/services/fence/fence-deploy.yaml", - "hashed_secret": "0849046cdafcdb17f5a4bf5c528430d5e04ad295", - "is_verified": false, - "line_number": 100 - }, - { - "type": "Secret Keyword", - "filename": "kube/services/fence/fence-deploy.yaml", - "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", - "is_verified": false, - "line_number": 103 - } - ], "kube/services/fenceshib/fenceshib-canary-deploy.yaml": [ { "type": "Secret Keyword", @@ -1344,22 +1154,6 @@ "line_number": 82 } ], - "kube/services/guppy/guppy-deploy.yaml": [ - { - "type": "Secret Keyword", - "filename": "kube/services/guppy/guppy-deploy.yaml", - "hashed_secret": "0db22b31c9add2d3c76743c0ac6fbc99bb8b4761", - "is_verified": false, - "line_number": 66 - }, - { - "type": "Secret Keyword", - "filename": "kube/services/guppy/guppy-deploy.yaml", - "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", - "is_verified": false, - "line_number": 69 - } - ], "kube/services/indexd/indexd-canary-deploy.yaml": [ { "type": "Secret Keyword", @@ -1390,36 +1184,6 @@ "line_number": 71 } ], - "kube/services/indexd/indexd-deploy.yaml": [ - { - "type": "Secret Keyword", - "filename": "kube/services/indexd/indexd-deploy.yaml", - "hashed_secret": "0b701c1fabb6ba47a7d47d455e3696d207014bd3", - "is_verified": false, - "line_number": 64 - }, - { - "type": "Secret Keyword", - "filename": "kube/services/indexd/indexd-deploy.yaml", - "hashed_secret": "aee98a99696237d70b6854ee4c2d9e42bc696039", - "is_verified": false, - "line_number": 67 - }, - { - "type": "Secret Keyword", - "filename": "kube/services/indexd/indexd-deploy.yaml", - "hashed_secret": "bdecca54d39013d43d3b7f05f2927eaa7df375dc", - "is_verified": false, - "line_number": 73 - }, - { - "type": "Secret Keyword", - "filename": "kube/services/indexd/indexd-deploy.yaml", - "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", - "is_verified": false, - "line_number": 76 - } - ], "kube/services/jenkins-ci-worker/jenkins-ci-worker-deploy.yaml": [ { "type": "Secret Keyword", @@ -1436,22 +1200,6 @@ "line_number": 146 } ], - "kube/services/jenkins-worker/jenkins-worker-deploy.yaml": [ - { - "type": "Secret Keyword", - "filename": "kube/services/jenkins-worker/jenkins-worker-deploy.yaml", - "hashed_secret": "c937b6fbb346a51ef679dd02ac5c4863e02bfdbf", - "is_verified": false, - "line_number": 150 - }, - { - "type": "Secret Keyword", - "filename": "kube/services/jenkins-worker/jenkins-worker-deploy.yaml", - "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", - "is_verified": false, - "line_number": 153 - } - ], "kube/services/jenkins/jenkins-deploy.yaml": [ { "type": "Secret Keyword", @@ -1484,22 +1232,6 @@ "line_number": 146 } ], - "kube/services/jenkins2-worker/jenkins2-worker-deploy.yaml": [ - { - "type": "Secret Keyword", - "filename": "kube/services/jenkins2-worker/jenkins2-worker-deploy.yaml", - "hashed_secret": "c937b6fbb346a51ef679dd02ac5c4863e02bfdbf", - "is_verified": false, - "line_number": 146 - }, - { - "type": "Secret Keyword", - "filename": "kube/services/jenkins2-worker/jenkins2-worker-deploy.yaml", - "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", - "is_verified": false, - "line_number": 149 - } - ], "kube/services/jenkins2/jenkins2-deploy.yaml": [ { "type": "Secret Keyword", @@ -2880,24 +2612,6 @@ "line_number": 130 } ], - "kube/services/ohif-viewer/ohif-viewer-deploy.yaml": [ - { - "type": "Secret Keyword", - "filename": "kube/services/ohif-viewer/ohif-viewer-deploy.yaml", - "hashed_secret": "3f87db80519a9ae7d8112f4e0d4cc81441181818", - "is_verified": false, - "line_number": 40 - } - ], - "kube/services/orthanc/orthanc-deploy.yaml": [ - { - "type": "Secret Keyword", - "filename": "kube/services/orthanc/orthanc-deploy.yaml", - "hashed_secret": "3f87db80519a9ae7d8112f4e0d4cc81441181818", - "is_verified": false, - "line_number": 41 - } - ], "kube/services/peregrine/peregrine-canary-deploy.yaml": [ { "type": "Secret Keyword", @@ -2928,52 +2642,6 @@ "line_number": 73 } ], - "kube/services/peregrine/peregrine-deploy.yaml": [ - { - "type": "Secret Keyword", - "filename": "kube/services/peregrine/peregrine-deploy.yaml", - "hashed_secret": "6131c35d7eebdbc17a314bef8aac75b87323cff3", - "is_verified": false, - "line_number": 68 - }, - { - "type": "Secret Keyword", - "filename": "kube/services/peregrine/peregrine-deploy.yaml", - "hashed_secret": "ca253d1c9dece2da0d6fb24ded7bdb849a475966", - "is_verified": false, - "line_number": 71 - }, - { - "type": "Secret Keyword", - "filename": "kube/services/peregrine/peregrine-deploy.yaml", - "hashed_secret": "990a3202b5c94aa5e5997e7dc1a218e457f8b8ec", - "is_verified": false, - "line_number": 77 - }, - { - "type": "Secret Keyword", - "filename": "kube/services/peregrine/peregrine-deploy.yaml", - "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", - "is_verified": false, - "line_number": 80 - } - ], - "kube/services/pidgin/pidgin-deploy.yaml": [ - { - "type": "Secret Keyword", - "filename": "kube/services/pidgin/pidgin-deploy.yaml", - "hashed_secret": "49af232c7adfcd54a40202e06261396a757e4ddd", - "is_verified": false, - "line_number": 59 - }, - { - "type": "Secret Keyword", - "filename": "kube/services/pidgin/pidgin-deploy.yaml", - "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", - "is_verified": false, - "line_number": 62 - } - ], "kube/services/portal/portal-deploy.yaml": [ { "type": "Secret Keyword", @@ -3034,71 +2702,6 @@ "line_number": 68 } ], - "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml": [ - { - "type": "Secret Keyword", - "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml", - "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", - "is_verified": false, - "line_number": 74 - }, - { - "type": "Secret Keyword", - "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml", - "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", - "is_verified": false, - "line_number": 77 - }, - { - "type": "Secret Keyword", - "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml", - "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", - "is_verified": false, - "line_number": 80 - }, - { - "type": "Secret Keyword", - "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml", - "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", - "is_verified": false, - "line_number": 90 - }, - { - "type": "Secret Keyword", - "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml", - "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", - "is_verified": false, - "line_number": 93 - }, - { - "type": "Secret Keyword", - "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml", - "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", - "is_verified": false, - "line_number": 96 - }, - { - "type": "Secret Keyword", - "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml", - "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457", - "is_verified": false, - "line_number": 99 - }, - { - "type": "Secret Keyword", - "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml", - "hashed_secret": "0849046cdafcdb17f5a4bf5c528430d5e04ad295", - "is_verified": false, - "line_number": 102 - }, - { - "type": "Secret Keyword", - "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml", - "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", - "is_verified": false, - "line_number": 105 - } - ], "kube/services/qa-dashboard/qa-dashboard-deployment.yaml": [ { "type": "Secret Keyword", @@ -3231,36 +2834,6 @@ "line_number": 70 } ], - "kube/services/sheepdog/sheepdog-deploy.yaml": [ - { - "type": "Secret Keyword", - "filename": "kube/services/sheepdog/sheepdog-deploy.yaml", - "hashed_secret": "ec9c944c51e87322de8d22e3ca9e2be1ad8fee0d", - "is_verified": false, - "line_number": 64 - }, - { - "type": "Secret Keyword", - "filename": "kube/services/sheepdog/sheepdog-deploy.yaml", - "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc", - "is_verified": false, - "line_number": 67 - }, - { - "type": "Secret Keyword", - "filename": "kube/services/sheepdog/sheepdog-deploy.yaml", - "hashed_secret": "e43756046ad1763d6946575fed0e05130a154bd2", - "is_verified": false, - "line_number": 73 - }, - { - "type": "Secret Keyword", - "filename": "kube/services/sheepdog/sheepdog-deploy.yaml", - "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", - "is_verified": false, - "line_number": 76 - } - ], "kube/services/shiny/shiny-deploy.yaml": [ { "type": "Secret Keyword", @@ -3361,15 +2934,6 @@ "line_number": 66 } ], - "kube/services/wts/wts-deploy.yaml": [ - { - "type": "Secret Keyword", - "filename": "kube/services/wts/wts-deploy.yaml", - "hashed_secret": "5de687ae886f19c3cb68d4980e3f2e77cca3db9e", - "is_verified": false, - "line_number": 66 - } - ], "packer/buildAll.sh": [ { "type": "Secret Keyword", @@ -3689,5 +3253,5 @@ } ] }, - "generated_at": "2024-08-27T21:36:15Z" + "generated_at": "2024-11-25T11:39:59Z" } diff --git a/Docker/jenkins/Jenkins-CI-Worker/Dockerfile b/Docker/jenkins/Jenkins-CI-Worker/Dockerfile index 9401e6a4b..ee9a64243 100644 --- a/Docker/jenkins/Jenkins-CI-Worker/Dockerfile +++ b/Docker/jenkins/Jenkins-CI-Worker/Dockerfile @@ -97,7 +97,7 @@ RUN sed -i 's/python3/python3.9/' /usr/bin/lsb_release && \ sed -i 's/python3/python3.9/' /usr/bin/add-apt-repository # install aws cli, poetry, pytest, etc. -RUN set -xe && python3.9 -m pip install --upgrade pip setuptools && python3.9 -m pip install awscli --upgrade && python3.9 -m pip install pytest --upgrade && python3.9 -m pip install poetry && python3.9 -m pip install PyYAML --upgrade && python3.9 -m pip install lxml --upgrade && python3.9 -m pip install yq --upgrade && python3.9 -m pip install datadog --upgrade +RUN set -xe && python3.9 -m pip install --upgrade pip setuptools && python3.9 -m pip install awscli --upgrade && python3.9 -m pip install pytest --upgrade && python3.9 -m pip install poetry && python3.9 -m pip install PyYAML --upgrade && python3.9 -m pip install lxml --upgrade && python3.9 -m pip install yq --upgrade # install terraform RUN curl -o /tmp/terraform.zip https://releases.hashicorp.com/terraform/0.11.15/terraform_0.11.15_linux_amd64.zip \ diff --git a/Docker/python-nginx/python3.10-buster/dockerrun.sh b/Docker/python-nginx/python3.10-buster/dockerrun.sh index 583590e36..f8f2e119a 100644 --- a/Docker/python-nginx/python3.10-buster/dockerrun.sh +++ b/Docker/python-nginx/python3.10-buster/dockerrun.sh @@ -86,16 +86,9 @@ if [ -f ./wsgi.py ] && [ "$GEN3_DEBUG" = "True" ]; then printf "\napplication.debug=True\n\n" >> ./wsgi.py fi -if [ -z $DD_ENABLED ]; then ( run uwsgi --ini /etc/uwsgi/uwsgi.ini ) & -else -echo "import=ddtrace.bootstrap.sitecustomize" >> /etc/uwsgi/uwsgi.ini -( - ddtrace-run uwsgi --enable-threads --ini /etc/uwsgi/uwsgi.ini -) & -fi run nginx -g 'daemon off;' wait diff --git a/Docker/python-nginx/python3.6-alpine3.7/dockerrun.sh b/Docker/python-nginx/python3.6-alpine3.7/dockerrun.sh index 4f4f6a6f6..8393ecf5f 100644 --- a/Docker/python-nginx/python3.6-alpine3.7/dockerrun.sh +++ b/Docker/python-nginx/python3.6-alpine3.7/dockerrun.sh @@ -86,16 +86,9 @@ if [ -f ./wsgi.py ] && [ "$GEN3_DEBUG" = "True" ]; then echo -e "\napplication.debug=True\n" >> ./wsgi.py fi -if [[ -z $DD_ENABLED ]]; then ( run uwsgi --ini /etc/uwsgi/uwsgi.ini ) & -else -echo "import=ddtrace.bootstrap.sitecustomize" >> /etc/uwsgi/uwsgi.ini -( - ddtrace-run uwsgi --enable-threads --ini /etc/uwsgi/uwsgi.ini -) & -fi run nginx -g 'daemon off;' wait diff --git a/Docker/python-nginx/python3.6-buster/dockerrun.sh b/Docker/python-nginx/python3.6-buster/dockerrun.sh index 583590e36..f8f2e119a 100644 --- a/Docker/python-nginx/python3.6-buster/dockerrun.sh +++ b/Docker/python-nginx/python3.6-buster/dockerrun.sh @@ -86,16 +86,9 @@ if [ -f ./wsgi.py ] && [ "$GEN3_DEBUG" = "True" ]; then printf "\napplication.debug=True\n\n" >> ./wsgi.py fi -if [ -z $DD_ENABLED ]; then ( run uwsgi --ini /etc/uwsgi/uwsgi.ini ) & -else -echo "import=ddtrace.bootstrap.sitecustomize" >> /etc/uwsgi/uwsgi.ini -( - ddtrace-run uwsgi --enable-threads --ini /etc/uwsgi/uwsgi.ini -) & -fi run nginx -g 'daemon off;' wait diff --git a/Docker/python-nginx/python3.9-buster/dockerrun.sh b/Docker/python-nginx/python3.9-buster/dockerrun.sh index 583590e36..f8f2e119a 100644 --- a/Docker/python-nginx/python3.9-buster/dockerrun.sh +++ b/Docker/python-nginx/python3.9-buster/dockerrun.sh @@ -86,16 +86,9 @@ if [ -f ./wsgi.py ] && [ "$GEN3_DEBUG" = "True" ]; then printf "\napplication.debug=True\n\n" >> ./wsgi.py fi -if [ -z $DD_ENABLED ]; then ( run uwsgi --ini /etc/uwsgi/uwsgi.ini ) & -else -echo "import=ddtrace.bootstrap.sitecustomize" >> /etc/uwsgi/uwsgi.ini -( - ddtrace-run uwsgi --enable-threads --ini /etc/uwsgi/uwsgi.ini -) & -fi run nginx -g 'daemon off;' wait diff --git a/gen3/bin/kube-setup-aurora-monitoring.sh b/gen3/bin/kube-setup-aurora-monitoring.sh deleted file mode 100644 index 5029a87ca..000000000 --- a/gen3/bin/kube-setup-aurora-monitoring.sh +++ /dev/null @@ -1,167 +0,0 @@ -source "${GEN3_HOME}/gen3/lib/utils.sh" -gen3_load "gen3/gen3setup" - -databaseArray=() -databaseFarmArray=() - -# This function is going to retrieve and return all the top-level entries from creds.json, that has the db items we want. -# This way, we can use this information while we're creating schemas and the like -get_all_dbs() { - databases=$(jq 'to_entries[] | select (.value.db_password) | .key' $(gen3_secrets_folder)/creds.json) - - OLD_IFS=$IFS - IFS=$'\n' databaseArray=($databases) - IFS=$OLD_IFS -} - -get_all_dbs_db_farm() { - databases=$(jq 'to_entries[] | .key' $(gen3_secrets_folder)/g3auto/dbfarm/servers.json) - - OLD_IFS=$IFS - IFS=$'\n' databaseFarmArray=($databases) - IFS=$OLD_IFS -} - -create_new_datadog_user() { - # Generate a new password for the datadog user in psql - datadogPsqlPassword=$(random_alphanumeric) - - # update creds.json - if [ ! -d "$(gen3_secrets_folder)/datadog" ] - then - mkdir "$(gen3_secrets_folder)/datadog" - fi - - if [ ! -s "$(gen3_secrets_folder)/datadog/datadog_db_users" ] - then - echo "{}" > "$(gen3_secrets_folder)/datadog/datadog_db_users.json" - fi - - output=$(jq --arg host "$1" --arg password "$datadogPsqlPassword" '.[$host].datadog_db_password=$password' "$(gen3_secrets_folder)/datadog/datadog_db_users.json") - echo "$output" > "$(gen3_secrets_folder)/datadog/datadog_db_users.json" - - username=$(jq --arg host "$1" 'map(select(.db_host==$host))[0] | .db_username' $(gen3_secrets_folder)/g3auto/dbfarm/servers.json | tr -d '"') - password=$(jq --arg host "$1" 'map(select(.db_host==$host))[0] | .db_password' $(gen3_secrets_folder)/g3auto/dbfarm/servers.json | tr -d '"') - - # Create the Datadog user in the database - if PGPASSWORD=$password psql -h "$1" -U "$username" -c "SELECT 1 FROM pg_roles WHERE rolname='datadog'" | grep -q 1; - then - PGPASSWORD=$password psql -h "$1" -U "$username" -c "ALTER USER datadog WITH password '$datadogPsqlPassword';" - else - PGPASSWORD=$password psql -h "$1" -U "$username" -c "CREATE USER datadog WITH password '$datadogPsqlPassword';" - fi - - echo $datadogPsqlPassword -} - -get_datadog_db_password() { - # Create the Datadog user - datadogPsqlPassword="$(jq --arg host "$1" '.[$host].datadog_db_password' < $(gen3_secrets_folder)/datadog/datadog_db_users.json)" - if [[ -z "$datadogPsqlPassword" ]] - then - datadogPsqlPassword=$(create_new_datadog_user $1) - fi - - echo $datadogPsqlPassword -} - -create_schema_and_function() { - svc=$(echo $1 | tr -d '"') - host=$(jq --arg service "$svc" '.[$service].db_host' $(gen3_secrets_folder)/creds.json | tr -d '"') - database=$(jq --arg service "$svc" '.[$service].db_database' $(gen3_secrets_folder)/creds.json | tr -d '"') - - username=$(jq --arg host "$host" 'map(select(.db_host==$host))[0] | .db_username' $(gen3_secrets_folder)/g3auto/dbfarm/servers.json | tr -d '"') - password=$(jq --arg host "$host" 'map(select(.db_host==$host))[0] | .db_password' $(gen3_secrets_folder)/g3auto/dbfarm/servers.json | tr -d '"') - - ddPass=$(get_datadog_db_password $host) - - PGPASSWORD=$password psql -h $host -U $username -d $database -t < /dev/null -then - gen3_log_info "We detected an ArgoCD application named 'datadog-application,' so we're modifying that" - - patch=$(yq -n --yaml-output --arg confd "$confd" '.spec.source.helm.values = $confd') - - echo "$patch" > /tmp/confd.yaml - - kubectl patch applications.argoproj.io datadog-application --type merge -n argocd --patch-file /tmp/confd.yaml - -else - gen3_log_info "We didn't detect an ArgoCD application named 'datadog-application,' so we're going to reinstall the DD Helm chart" - - (cat kube/services/datadog/values.yaml | yq --arg endpoints "$postgresString" --yaml-output '.clusterAgent.confd."postgres.yaml" = $endpoints | .clusterChecksRunner.enabled = true') > $(gen3_secrets_folder)/datadog/datadog_values.yaml - helm repo add datadog https://helm.datadoghq.com --force-update 2> >(grep -v 'This is insecure' >&2) - helm repo update 2> >(grep -v 'This is insecure' >&2) - helm upgrade --install datadog -f "$(gen3_secrets_folder)/datadog/datadog_values.yaml" datadog/datadog -n datadog --version 3.6.4 2> >(grep -v 'This is insecure' >&2) -fi \ No newline at end of file diff --git a/gen3/bin/kube-setup-aws-es-proxy.sh b/gen3/bin/kube-setup-aws-es-proxy.sh index 5a1f5ac0e..d01d1775f 100644 --- a/gen3/bin/kube-setup-aws-es-proxy.sh +++ b/gen3/bin/kube-setup-aws-es-proxy.sh @@ -8,7 +8,6 @@ source "${GEN3_HOME}/gen3/lib/utils.sh" gen3_load "gen3/lib/kube-setup-init" -# Deploy Datadog with argocd if flag is set in the manifest path manifestPath=$(g3k_manifest_path) es7="$(jq -r ".[\"global\"][\"es7\"]" < "$manifestPath" | tr '[:upper:]' '[:lower:]')" esDomain="$(jq -r ".[\"global\"][\"esDomain\"]" < "$manifestPath" | tr '[:upper:]' '[:lower:]')" @@ -140,4 +139,4 @@ POLICY gen3 kube-setup-networkpolicy service aws-es-proxy g3kubectl patch deployment "aws-es-proxy-deployment" -p '{"spec":{"template":{"metadata":{"labels":{"netvpc":"yes"}}}}}' || true fi - fi \ No newline at end of file + fi diff --git a/gen3/bin/kube-setup-datadog.sh b/gen3/bin/kube-setup-datadog.sh deleted file mode 100644 index 3ff5d2e2b..000000000 --- a/gen3/bin/kube-setup-datadog.sh +++ /dev/null @@ -1,96 +0,0 @@ -#!/bin/bash -# - -source "${GEN3_HOME}/gen3/lib/utils.sh" -gen3_load "gen3/gen3setup" -gen3_load "gen3/lib/kube-setup-init" - -# Deploy Datadog with argocd if flag is set in the manifest path -manifestPath=$(g3k_manifest_path) -argocd="$(jq -r ".[\"global\"][\"argocd\"]" < "$manifestPath" | tr '[:upper:]' '[:lower:]')" - -if [[ -n "$JENKINS_HOME" ]]; then - gen3_log_info "Jenkins skipping datadog setup: $JENKINS_HOME" - exit 0 -fi - -ctx="$(g3kubectl config current-context)" -ctxNamespace="$(g3kubectl config view -ojson | jq -r ".contexts | map(select(.name==\"$ctx\")) | .[0] | .context.namespace")" -# only do this if we are running in the default namespace -if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then - if (! helm status datadog -n datadog > /dev/null 2>&1 ) || [[ "$1" == "--force" ]]; then - ( # subshell - # clean up any manual deployment - if ( g3kubectl get namespace datadog > /dev/null 2>&1 && ! helm status datadog -n datadog > /dev/null 2>&1); then - gen3_log_info "Deleting old namespace, as it is not deployed via helm" - g3kubectl delete namespace datadog - g3kubectl create namespace datadog - fi - # create namespace if it doesn't exist - if (! g3kubectl get namespace datadog > /dev/null 2>&1); then - gen3_log_info "Creating namespace datadog" - g3kubectl create namespace datadog - fi - export KUBECTL_NAMESPACE=datadog - if [[ -f "$(gen3_secrets_folder)/datadog/apikey" ]]; then - if (g3kubectl get secret datadog-agent > /dev/null 2>&1); then - g3kubectl delete secret --namespace datadog datadog-agent - fi - g3kubectl create secret generic --namespace datadog datadog-agent --from-file=api-key="$(gen3_secrets_folder)/datadog/apikey" - else - gen3_log_err "Before you can deploy datadog you need to put your datadog apikey in this file: $(gen3_secrets_folder)/datadog/apikey" - exit 1 - fi - if (! g3kubectl get secret --namespace datadog datadog-agent-cluster-agent > /dev/null 2>&1); then - # random string to secure communication between node-based agents and the cluster agent - TOKEN=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 32 | head -n 1) - g3kubectl create secret --namespace datadog generic datadog-agent-cluster-agent --from-literal=token="$TOKEN" - fi - helm repo add datadog https://helm.datadoghq.com --force-update 2> >(grep -v 'This is insecure' >&2) - helm repo update 2> >(grep -v 'This is insecure' >&2) - if [ "$argocd" = true ]; then - g3kubectl apply -f "$GEN3_HOME/kube/services/datadog/datadog-application.yaml" --namespace=argocd - else - helm upgrade --install datadog -f "$GEN3_HOME/kube/services/datadog/values.yaml" datadog/datadog -n datadog --version 3.6.4 2> >(grep -v 'This is insecure' >&2) - fi - - # Check the manifest to see if we want to set up database monitoring - # Get the name of the cluster - # Run the command - - if g3k_manifest_lookup .datadog.db_monitoring_enabled &> /dev/null; then - gen3_log_info "Detected that this commons is using database monitoring. Setting that up now." - clusters=$(aws rds describe-db-clusters --query "DBClusters[].DBClusterIdentifier" --output text) - clusterArray=($clusters) - - for i in "${!clusterArray[@]}"; do - echo "$((i+1)). ${clusterArray[i]}" - done - - selected="false" - selection="" - - until [ $selected == "true" ] - do - read -p "Enter the number of the cluster you want to monitor (1-${#clusterArray[@]}): " num - if [[ "$num" =~ ^[0-9]+$ ]] && ((num >= 1 && num <= ${#clusterArray[@]})); then - echo "You entered: $num" - selected="true" - selection=${clusterArray[$num - 1]} - else - echo "Invalid input: $num" - fi - done - - gen3 kube-setup-aurora-monitoring "$selection" - else - gen3_log_info "No database monitoring detected. We're done here." - fi - - ) - else - gen3_log_info "kube-setup-datadog exiting - datadog already deployed, use --force to redeploy" - fi -else - gen3_log_info "kube-setup-fluentd exiting - only deploys in default namespace" -fi diff --git a/gen3/lib/g3k_manifest.sh b/gen3/lib/g3k_manifest.sh index d69ef5b99..051d88892 100644 --- a/gen3/lib/g3k_manifest.sh +++ b/gen3/lib/g3k_manifest.sh @@ -250,13 +250,13 @@ g3k_manifest_filter() { kvList+=("$kvKey" "image: $value") kvLabelKey=$(echo "GEN3_${key}_VERSION" | tr '[:lower:]' '[:upper:]') version=$(echo $value | rev | cut -d ':' -f 1 | rev) - kvList+=("$kvLabelKey" "tags.datadoghq.com/version: '$version'") + kvList+=("$kvLabelKey" "version: '$version'") done environment="$(g3k_config_lookup ".global.environment" "$manifestPath")" hostname="$(g3k_config_lookup ".global.hostname" "$manifestPath")" kvEnvKey=$(echo "GEN3_ENV_LABEL" | tr '[:lower:]' '[:upper:]') kvHostKey=$(echo "GEN3_HOSTNAME_LABEL" | tr '[:lower:]' '[:upper:]') - kvList+=("$kvEnvKey" "tags.datadoghq.com/env: $environment") + kvList+=("$kvEnvKey" "env: $environment") kvList+=("$kvHostKey" "hostname: $hostname") for key in $(g3k_config_lookup '. | keys[]' "$manifestPath"); do gen3_log_debug "harvesting key $key" diff --git a/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml b/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml index 80538842e..eb9c45e1c 100644 --- a/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml +++ b/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml @@ -17,6 +17,9 @@ spec: maxUnavailable: 0 template: metadata: + annotations: + prometheus.io/scrape: "true" + prometheus.io/path: /metrics labels: app: fence release: production @@ -26,9 +29,9 @@ spec: netnolimit: "yes" public: "yes" userhelper: "yes" - tags.datadoghq.com/service: "fence" - tags.datadoghq.com/env: null - tags.datadoghq.com/version: 'master' + service: "fence" + env: null + version: 'master' date: "1579711361" spec: serviceAccountName: fence-sa @@ -113,40 +116,12 @@ spec: - name: fence image: quay.io/cdis/fence:master env: - - name: DD_ENABLED - valueFrom: - configMapKeyRef: - name: manifest-global - key: dd_enabled - optional: true - - name: DD_ENV - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/env'] - - name: DD_SERVICE - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/service'] - - name: DD_VERSION - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/version'] - - name: DD_LOGS_INJECTION - value: "true" - - name: DD_PROFILING_ENABLED - value: "true" - - name: DD_TRACE_SAMPLE_RATE - value: "1" - name: GEN3_UWSGI_TIMEOUT valueFrom: configMapKeyRef: name: manifest-global key: uwsgi-timeout optional: true - - name: DD_AGENT_HOST - valueFrom: - fieldRef: - fieldPath: status.hostIP - name: AWS_STS_REGIONAL_ENDPOINTS value: regional - name: PYTHONPATH diff --git a/gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml b/gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml index 08407ae52..cb77a5808 100644 --- a/gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml +++ b/gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml @@ -23,9 +23,9 @@ spec: public: "yes" # to download dictionary s3: "yes" - tags.datadoghq.com/service: "sheepdog" - tags.datadoghq.com/env: null - tags.datadoghq.com/version: 'master' + service: "sheepdog" + env: null + version: 'master' date: "1522344212" spec: affinity: @@ -96,34 +96,6 @@ spec: env: - name: GEN3_UWSGI_TIMEOUT value: "600" - - name: DD_ENABLED - valueFrom: - configMapKeyRef: - name: manifest-global - key: dd_enabled - optional: true - - name: DD_ENV - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/env'] - - name: DD_SERVICE - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/service'] - - name: DD_VERSION - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/version'] - - name: DD_LOGS_INJECTION - value: "true" - - name: DD_PROFILING_ENABLED - value: "true" - - name: DD_TRACE_SAMPLE_RATE - value: "1" - - name: DD_AGENT_HOST - valueFrom: - fieldRef: - fieldPath: status.hostIP - name: DICTIONARY_URL valueFrom: configMapKeyRef: diff --git a/kube/services/argo-wrapper/argo-wrapper-deploy.yaml b/kube/services/argo-wrapper/argo-wrapper-deploy.yaml index 3b9d1b6a2..87caa0c25 100644 --- a/kube/services/argo-wrapper/argo-wrapper-deploy.yaml +++ b/kube/services/argo-wrapper/argo-wrapper-deploy.yaml @@ -18,7 +18,6 @@ spec: metadata: labels: app: argo-wrapper - tags.datadoghq.com/service: "argo-wrapper" netnolimit: "yes" public: "yes" GEN3_ENV_LABEL diff --git a/kube/services/argo/values.yaml b/kube/services/argo/values.yaml index c1e951773..21c271a65 100644 --- a/kube/services/argo/values.yaml +++ b/kube/services/argo/values.yaml @@ -16,20 +16,6 @@ controller: prometheus.io/path: /metrics prometheus.io/port: "9090" - ad.datadoghq.com/controller.checks: | - { - "openmetrics": { - "init_config": {}, - "instances": [ - { - "openmetrics_endpoint": "http://%%host%%:%%port%%/metrics ", - "namespace": "argo", - "metrics": ["*"] - } - ] - } - } - resourceRateLimit: limit: 40 burst: 4 diff --git a/kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml b/kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml index 740e18c91..2d89dc60e 100644 --- a/kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml +++ b/kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml @@ -99,34 +99,6 @@ spec: key: "cedar_api_key.txt" - name: GEN3_DEBUG GEN3_DEBUG_FLAG|-value: "False"-| - - name: DD_ENABLED - valueFrom: - configMapKeyRef: - name: manifest-global - key: dd_enabled - optional: true - - name: DD_ENV - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/env'] - - name: DD_SERVICE - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/service'] - - name: DD_VERSION - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/version'] - - name: DD_LOGS_INJECTION - value: "true" - - name: DD_PROFILING_ENABLED - value: "true" - - name: DD_TRACE_SAMPLE_RATE - value: "1" - - name: DD_AGENT_HOST - valueFrom: - fieldRef: - fieldPath: status.hostIP volumeMounts: - name: "ca-volume" readOnly: true diff --git a/kube/services/cohort-middleware/cohort-middleware-deploy.yaml b/kube/services/cohort-middleware/cohort-middleware-deploy.yaml index c7c411f4c..62b09c494 100644 --- a/kube/services/cohort-middleware/cohort-middleware-deploy.yaml +++ b/kube/services/cohort-middleware/cohort-middleware-deploy.yaml @@ -22,7 +22,6 @@ spec: dbohdsi: "yes" dbomop-data: "yes" public: "yes" - tags.datadoghq.com/service: "cohort-middleware" GEN3_ENV_LABEL GEN3_COHORT-MIDDLEWARE_VERSION GEN3_DATE_LABEL @@ -65,34 +64,6 @@ spec: - name: cohort-middleware GEN3_COHORT-MIDDLEWARE_IMAGE|-image: quay.io/cdis/cohort-middleware:latest-| env: - - name: DD_ENABLED - valueFrom: - configMapKeyRef: - name: manifest-global - key: dd_enabled - optional: true - - name: DD_ENV - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/env'] - - name: DD_SERVICE - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/service'] - - name: DD_VERSION - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/version'] - - name: DD_LOGS_INJECTION - value: "true" - - name: DD_PROFILING_ENABLED - value: "true" - - name: DD_TRACE_SAMPLE_RATE - value: "1" - - name: DD_AGENT_HOST - valueFrom: - fieldRef: - fieldPath: status.hostIP - name: GIN_MODE value: "release" - name: PORT diff --git a/kube/services/datadog/datadog-application.yaml b/kube/services/datadog/datadog-application.yaml deleted file mode 100644 index 19e0e1d86..000000000 --- a/kube/services/datadog/datadog-application.yaml +++ /dev/null @@ -1,27 +0,0 @@ -apiVersion: argoproj.io/v1alpha1 -kind: Application -metadata: - name: datadog-application - namespace: argocd -spec: - project: default - sources: - - chart: datadog - repoURL: 'https://helm.datadoghq.com' - targetRevision: 3.6.4 - helm: - valueFiles: - - $values/kube/services/datadog/values.yaml - releaseName: datadog - - repoURL: 'https://github.com/uc-cdis/cloud-automation.git' - targetRevision: master - ref: values - destination: - server: 'https://kubernetes.default.svc' - namespace: datadog - syncPolicy: - automated: - prune: true - selfHeal: true - syncOptions: - - CreateNamespace=true diff --git a/kube/services/datadog/datadog-namespace.yaml b/kube/services/datadog/datadog-namespace.yaml deleted file mode 100644 index 90fe854a3..000000000 --- a/kube/services/datadog/datadog-namespace.yaml +++ /dev/null @@ -1,7 +0,0 @@ -# create datadog namespace -apiVersion: v1 -kind: Namespace -metadata: - name: datadog - labels: - name: datadog diff --git a/kube/services/datadog/datadog_db_user.json b/kube/services/datadog/datadog_db_user.json deleted file mode 100644 index 0eca1be9f..000000000 --- a/kube/services/datadog/datadog_db_user.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "datadog_db_user": "datadog", - "datadog_db_password": null -} \ No newline at end of file diff --git a/kube/services/datadog/postgres.yaml b/kube/services/datadog/postgres.yaml deleted file mode 100644 index f85dc0970..000000000 --- a/kube/services/datadog/postgres.yaml +++ /dev/null @@ -1,8 +0,0 @@ -cluster_check: true -init_config: -instances: - - dbm: true - host: - port: 5432 - username: datadog - password: \ No newline at end of file diff --git a/kube/services/datadog/values.yaml b/kube/services/datadog/values.yaml deleted file mode 100644 index fc0bbab8b..000000000 --- a/kube/services/datadog/values.yaml +++ /dev/null @@ -1,342 +0,0 @@ -# values yaml for datadog -# https://github.com/DataDog/helm-charts/tree/main/charts/datadog - -datadog: - ## dogstatsd configuration - ## ref: https://docs.datadoghq.com/agent/kubernetes/dogstatsd/ - ## To emit custom metrics from your Kubernetes application, use DogStatsD. - dogstatsd: - port: 8125 - useHostPort: true - nonLocalTraffic: true - - #This is used to configure a lot of checks that Datadog does. Normally, we would annotate a service, but since we - #use aurora, we'll have to configure from confd instead - - #Enables Optional Universal Service Monitoring - ## ref: https://docs.datadoghq.com/tracing/universal_service_monitoring/?tab=helm - serviceMonitoring: - enabled: false - - # datadog.apiKeyExistingSecret -- Use existing Secret which stores API key instead of creating a new one. The value should be set with the `api-key` key inside the secret. - ## If set, this parameter takes precedence over "apiKey". - apiKeyExistingSecret: "ddgov-apikey" - - # datadog.site -- The site of the Datadog intake to send Agent data to. - # (documentation: https://docs.datadoghq.com/getting_started/site/) - - ## Set to 'datadoghq.com' to send data to the US1 site (default). - ## Set to 'datadoghq.eu' to send data to the EU site. - ## Set to 'us3.datadoghq.com' to send data to the US3 site. - ## Set to 'us5.datadoghq.com' to send data to the US5 site. - ## Set to 'ddog-gov.com' to send data to the US1-FED site. - ## Set to 'ap1.datadoghq.com' to send data to the AP1 site. - site: ddog-gov.com - - # datadog.kubeStateMetricsEnabled -- If true, deploys the kube-state-metrics deployment - ## ref: https://github.com/kubernetes/kube-state-metrics/tree/kube-state-metrics-helm-chart-2.13.2/charts/kube-state-metrics - kubeStateMetricsEnabled: false - - kubeStateMetricsCore: - # datadog.kubeStateMetricsCore.enabled -- Enable the kubernetes_state_core check in the Cluster Agent (Requires Cluster Agent 1.12.0+) - ## ref: https://docs.datadoghq.com/integrations/kubernetes_state_core - enabled: true - - - ## Manage Cluster checks feature - ## ref: https://docs.datadoghq.com/agent/autodiscovery/clusterchecks/ - ## Autodiscovery via Kube Service annotations is automatically enabled - clusterChecks: - # datadog.clusterChecks.enabled -- Enable the Cluster Checks feature on both the cluster-agents and the daemonset - enabled: true - - - ## Enable logs agent and provide custom configs - logs: - # datadog.logs.enabled -- Enables this to activate Datadog Agent log collection - ## ref: https://docs.datadoghq.com/agent/basic_agent_usage/kubernetes/#log-collection-setup - enabled: true - - # datadog.logs.containerCollectAll -- Enable this to allow log collection for all containers - ## ref: https://docs.datadoghq.com/agent/basic_agent_usage/kubernetes/#log-collection-setup - containerCollectAll: true - - # datadog.logs.containerCollectUsingFiles -- Collect logs from files in /var/log/pods instead of using container runtime API - ## It's usually the most efficient way of collecting logs. - ## ref: https://docs.datadoghq.com/agent/basic_agent_usage/kubernetes/#log-collection-setup - containerCollectUsingFiles: true - - ## Enable apm agent and provide custom configs - apm: - # datadog.apm.socketEnabled -- Enable APM over Socket (Unix Socket or windows named pipe) - ## ref: https://docs.datadoghq.com/agent/kubernetes/apm/ - socketEnabled: false - - # datadog.apm.portEnabled -- Enable APM over TCP communication (port 8126 by default) - ## ref: https://docs.datadoghq.com/agent/kubernetes/apm/ - portEnabled: false - - enabled: false - - # datadog.apm.port -- Override the trace Agent port - ## Note: Make sure your client is sending to the same UDP port. - port: 8126 - - - - ## Enable process agent and provide custom configs - processAgent: - # datadog.processAgent.enabled -- Set this to true to enable live process monitoring agent - ## Note: /etc/passwd is automatically mounted to allow username resolution. - ## ref: https://docs.datadoghq.com/graphing/infrastructure/process/#kubernetes-daemonset - enabled: true - - # datadog.processAgent.processCollection -- Set this to true to enable process collection in process monitoring agent - ## Requires processAgent.enabled to be set to true to have any effect - processCollection: false - - # datadog.processAgent.stripProcessArguments -- Set this to scrub all arguments from collected processes - ## Requires processAgent.enabled and processAgent.processCollection to be set to true to have any effect - ## ref: https://docs.datadoghq.com/infrastructure/process/?tab=linuxwindows#process-arguments-scrubbing - stripProcessArguments: false - - # datadog.processAgent.processDiscovery -- Enables or disables autodiscovery of integrations - processDiscovery: false - - ## Enable systemProbe agent and provide custom configs - systemProbe: - resources: - requests: - cpu: 100m - memory: 200Mi - limits: - cpu: 100m - memory: 200Mi - - # datadog.systemProbe.debugPort -- Specify the port to expose pprof and expvar for system-probe agent - debugPort: 0 - - # datadog.systemProbe.enableConntrack -- Enable the system-probe agent to connect to the netlink/conntrack subsystem to add NAT information to connection data - ## Ref: http://conntrack-tools.netfilter.org/ - enableConntrack: false - - # datadog.systemProbe.seccomp -- Apply an ad-hoc seccomp profile to the system-probe agent to restrict its privileges - ## Note that this will break `kubectl exec … -c system-probe -- /bin/bash` - seccomp: localhost/system-probe - - # datadog.systemProbe.seccompRoot -- Specify the seccomp profile root directory - seccompRoot: /var/lib/kubelet/seccomp - - # datadog.systemProbe.bpfDebug -- Enable logging for kernel debug - bpfDebug: false - - # datadog.systemProbe.apparmor -- Specify a apparmor profile for system-probe - apparmor: unconfined - - # datadog.systemProbe.enableTCPQueueLength -- Enable the TCP queue length eBPF-based check - enableTCPQueueLength: false - - # datadog.systemProbe.enableOOMKill -- Enable the OOM kill eBPF-based check - enableOOMKill: false - - # datadog.systemProbe.enableRuntimeCompiler -- Enable the runtime compiler for eBPF probes - enableRuntimeCompiler: false - - # datadog.systemProbe.mountPackageManagementDirs -- Enables mounting of specific package management directories when runtime compilation is enabled - mountPackageManagementDirs: [] - ## For runtime compilation to be able to download kernel headers, the host's package management folders - ## must be mounted to the /host directory. For example, for Ubuntu & Debian the following mount would be necessary: - # - name: "apt-config-dir" - # hostPath: /etc/apt - # mountPath: /host/etc/apt - ## If this list is empty, then all necessary package management directories (for all supported OSs) will be mounted. - - # datadog.systemProbe.osReleasePath -- Specify the path to your os-release file if you don't want to attempt mounting all `/etc/*-release` file by default - osReleasePath: - - # datadog.systemProbe.runtimeCompilationAssetDir -- Specify a directory for runtime compilation assets to live in - runtimeCompilationAssetDir: /var/tmp/datadog-agent/system-probe - - # datadog.systemProbe.collectDNSStats -- Enable DNS stat collection - collectDNSStats: true - - # datadog.systemProbe.maxTrackedConnections -- the maximum number of tracked connections - maxTrackedConnections: 131072 - - # datadog.systemProbe.conntrackMaxStateSize -- the maximum size of the userspace conntrack cache - conntrackMaxStateSize: 131072 # 2 * maxTrackedConnections by default, per https://github.com/DataDog/datadog-agent/blob/d1c5de31e1bba72dfac459aed5ff9562c3fdcc20/pkg/process/config/config.go#L229 - - # datadog.systemProbe.conntrackInitTimeout -- the time to wait for conntrack to initialize before failing - conntrackInitTimeout: 10s - - orchestratorExplorer: - # datadog.orchestratorExplorer.enabled -- Set this to false to disable the orchestrator explorer - ## This requires processAgent.enabled and clusterAgent.enabled to be set to true - ## ref: TODO - add doc link - enabled: true - - # datadog.orchestratorExplorer.container_scrubbing -- Enable the scrubbing of containers in the kubernetes resource YAML for sensitive information - ## The container scrubbing is taking significant resources during data collection. - ## If you notice that the cluster-agent uses too much CPU in larger clusters - ## turning this option off will improve the situation. - container_scrubbing: - enabled: true - - networkMonitoring: - # datadog.networkMonitoring.enabled -- Enable network performance monitoring - enabled: false - - - ## Enable security agent and provide custom configs - securityAgent: - compliance: - # datadog.securityAgent.compliance.enabled -- Set to true to enable Cloud Security Posture Management (CSPM) - enabled: false - - # datadog.securityAgent.compliance.configMap -- Contains CSPM compliance benchmarks that will be used - configMap: - - # datadog.securityAgent.compliance.checkInterval -- Compliance check run interval - checkInterval: 20m - - runtime: - # datadog.securityAgent.runtime.enabled -- Set to true to enable Cloud Workload Security (CWS) - enabled: false - - policies: - # datadog.securityAgent.runtime.policies.configMap -- Contains CWS policies that will be used - configMap: - - syscallMonitor: - # datadog.securityAgent.runtime.syscallMonitor.enabled -- Set to true to enable the Syscall monitoring (recommended for troubleshooting only) - enabled: false - - - ## Configure prometheus scraping autodiscovery - ## ref: https://docs.datadoghq.com/agent/kubernetes/prometheus/ - prometheusScrape: - # datadog.prometheusScrape.enabled -- Enable autodiscovering pods and services exposing prometheus metrics. - enabled: false - # datadog.prometheusScrape.serviceEndpoints -- Enable generating dedicated checks for service endpoints. - serviceEndpoints: false - # datadog.prometheusScrape.additionalConfigs -- Allows adding advanced openmetrics check configurations with custom discovery rules. (Requires Agent version 7.27+) - additionalConfigs: [] - # - - # autodiscovery: - # kubernetes_annotations: - # include: - # custom_include_label: 'true' - # exclude: - # custom_exclude_label: 'true' - # kubernetes_container_names: - # - my-app - # configurations: - # - send_distribution_buckets: true - # timeout: 5 - - containerExcludeLogs: "kube_namespace:logging kube_namespace:argo name:pelican-export* name:job-task" - containerExclude: "kube_namespace:logging kube_namespace:kube-system kube_namespace:kubecost kube_namespace:argo kube_namespace:cortex-xdr kube_namespace:monitoring kube_namespace:datadog" -## This is the Datadog Cluster Agent implementation that handles cluster-wide -## metrics more cleanly, separates concerns for better rbac, and implements -## the external metrics API so you can autoscale HPAs based on datadog metrics -## ref: https://docs.datadoghq.com/agent/kubernetes/cluster/ -clusterAgent: - # clusterAgent.enabled -- Set this to false to disable Datadog Cluster Agent - enabled: true - - ## Define the Datadog Cluster-Agent image to work with - image: - # clusterAgent.image.name -- Cluster Agent image name to use (relative to `registry`) - name: cluster-agent - - # clusterAgent.image.tag -- Cluster Agent image tag to use - # tag: 1.16.0 - - # clusterAgent.image.repository -- Override default registry + image.name for Cluster Agent - repository: - - # clusterAgent.image.pullPolicy -- Cluster Agent image pullPolicy - pullPolicy: IfNotPresent - - # clusterAgent.image.pullSecrets -- Cluster Agent repository pullSecret (ex: specify docker registry credentials) - ## See https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod - pullSecrets: [] - # - name: "" - - - # clusterAgent.replicas -- Specify the of cluster agent replicas, if > 1 it allow the cluster agent to work in HA mode. - replicas: 1 - - ## Provide Cluster Agent Deployment pod(s) RBAC configuration - rbac: - # clusterAgent.rbac.create -- If true, create & use RBAC resources - create: true - - # clusterAgent.rbac.serviceAccountName -- Specify a preexisting ServiceAccount to use if clusterAgent.rbac.create is false - serviceAccountName: default - - # clusterAgent.rbac.serviceAccountAnnotations -- Annotations to add to the ServiceAccount if clusterAgent.rbac.create is true - serviceAccountAnnotations: {} - -agents: - # agents.enabled -- You should keep Datadog DaemonSet enabled! - ## The exceptional case could be a situation when you need to run - ## single Datadog pod per every namespace, but you do not need to - ## re-create a DaemonSet for every non-default namespace install. - ## Note: StatsD and DogStatsD work over UDP, so you may not - ## get guaranteed delivery of the metrics in Datadog-per-namespace setup! - # - enabled: true - - # agents.tolerations -- Allow the DaemonSet to schedule on tainted nodes (requires Kubernetes >= 1.6) - tolerations: - - effect: NoSchedule - key: role - operator: Equal - value: jupyter - - - ## Define the Datadog image to work with - image: - # agents.image.name -- Datadog Agent image name to use (relative to `registry`) - ## use "dogstatsd" for Standalone Datadog Agent DogStatsD 7 - name: agent - - # agents.image.tag -- Define the Agent version to use - # tag: 7.32.4 - - # agents.image.tagSuffix -- Suffix to append to Agent tag - ## Ex: - ## jmx to enable jmx fetch collection - ## servercore to get Windows images based on servercore - tagSuffix: "" - - # agents.image.repository -- Override default registry + image.name for Agent - repository: - - # agents.image.doNotCheckTag -- Skip the version<>chart compatibility check - ## By default, the version passed in agents.image.tag is checked - ## for compatibility with the version of the chart. - ## This boolean permits to completely skip this check. - ## This is useful, for example, for custom tags that are not - ## respecting semantic versioning - doNotCheckTag: # false - - # agents.image.pullPolicy -- Datadog Agent image pull policy - pullPolicy: IfNotPresent - - # agents.image.pullSecrets -- Datadog Agent repository pullSecret (ex: specify docker registry credentials) - ## See https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod - pullSecrets: [] - # - name: "" - - ## Provide Daemonset RBAC configuration - rbac: - # agents.rbac.create -- If true, create & use RBAC resources - create: true - - # agents.rbac.serviceAccountName -- Specify a preexisting ServiceAccount to use if agents.rbac.create is false - serviceAccountName: default - - # agents.rbac.serviceAccountAnnotations -- Annotations to add to the ServiceAccount if agents.rbac.create is true - serviceAccountAnnotations: {} diff --git a/kube/services/dicom-server/dicom-server-deploy.yaml b/kube/services/dicom-server/dicom-server-deploy.yaml index 58040e6d4..039189d7c 100644 --- a/kube/services/dicom-server/dicom-server-deploy.yaml +++ b/kube/services/dicom-server/dicom-server-deploy.yaml @@ -42,31 +42,6 @@ spec: containers: - name: dicom-server GEN3_DICOM-SERVER_IMAGE - env: - - name: DD_ENABLED - valueFrom: - configMapKeyRef: - name: manifest-global - key: dd_enabled - optional: true - - name: DD_ENV - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/env'] - - name: DD_SERVICE - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/service'] - - name: DD_VERSION - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/version'] - - name: DD_LOGS_INJECTION - value: "true" - - name: DD_AGENT_HOST - valueFrom: - fieldRef: - fieldPath: status.hostIP imagePullPolicy: Always readinessProbe: httpGet: diff --git a/kube/services/dicom-viewer/dicom-viewer-deploy.yaml b/kube/services/dicom-viewer/dicom-viewer-deploy.yaml index 7cd9b6bbe..1af817bfb 100644 --- a/kube/services/dicom-viewer/dicom-viewer-deploy.yaml +++ b/kube/services/dicom-viewer/dicom-viewer-deploy.yaml @@ -38,31 +38,6 @@ spec: containers: - name: dicom-viewer GEN3_DICOM-VIEWER_IMAGE - env: - - name: DD_ENABLED - valueFrom: - configMapKeyRef: - name: manifest-global - key: dd_enabled - optional: true - - name: DD_ENV - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/env'] - - name: DD_SERVICE - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/service'] - - name: DD_VERSION - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/version'] - - name: DD_LOGS_INJECTION - value: "true" - - name: DD_AGENT_HOST - valueFrom: - fieldRef: - fieldPath: status.hostIP imagePullPolicy: Always readinessProbe: httpGet: diff --git a/kube/services/fence/fence-deploy.yaml b/kube/services/fence/fence-deploy.yaml index cf03036df..153b2c626 100644 --- a/kube/services/fence/fence-deploy.yaml +++ b/kube/services/fence/fence-deploy.yaml @@ -29,7 +29,6 @@ spec: netnolimit: "yes" public: "yes" userhelper: "yes" - tags.datadoghq.com/service: "fence" GEN3_ENV_LABEL GEN3_HOSTNAME_LABEL GEN3_FENCE_VERSION @@ -117,40 +116,12 @@ spec: - name: fence GEN3_FENCE_IMAGE env: - - name: DD_ENABLED - valueFrom: - configMapKeyRef: - name: manifest-global - key: dd_enabled - optional: true - - name: DD_ENV - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/env'] - - name: DD_SERVICE - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/service'] - - name: DD_VERSION - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/version'] - - name: DD_LOGS_INJECTION - value: "true" - - name: DD_PROFILING_ENABLED - value: "true" - - name: DD_TRACE_SAMPLE_RATE - value: "1" - name: GEN3_UWSGI_TIMEOUT valueFrom: configMapKeyRef: name: manifest-global key: uwsgi-timeout optional: true - - name: DD_AGENT_HOST - valueFrom: - fieldRef: - fieldPath: status.hostIP - name: AWS_STS_REGIONAL_ENDPOINTS value: regional - name: PYTHONPATH diff --git a/kube/services/guppy/guppy-deploy.yaml b/kube/services/guppy/guppy-deploy.yaml index 1dc6c7da0..57b4dec10 100644 --- a/kube/services/guppy/guppy-deploy.yaml +++ b/kube/services/guppy/guppy-deploy.yaml @@ -19,7 +19,6 @@ spec: app: guppy public: "yes" netnolimit: "yes" - tags.datadoghq.com/service: "guppy" GEN3_GUPPY_VERSION GEN3_ENV_LABEL GEN3_DATE_LABEL @@ -110,34 +109,6 @@ spec: name: manifest-global key: tier_access_limit optional: true - - name: DD_TRACE_ENABLED - valueFrom: - configMapKeyRef: - name: manifest-global - key: dd_enabled - optional: true - - name: DD_ENV - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/env'] - - name: DD_SERVICE - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/service'] - - name: DD_VERSION - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/version'] - - name: DD_LOGS_INJECTION - value: "true" - - name: DD_PROFILING_ENABLED - value: "true" - - name: DD_TRACE_SAMPLE_RATE - value: "1" - - name: DD_TRACE_AGENT_HOSTNAME - valueFrom: - fieldRef: - fieldPath: status.hostIP volumeMounts: - name: guppy-config readOnly: true diff --git a/kube/services/hatchery/hatchery-deploy.yaml b/kube/services/hatchery/hatchery-deploy.yaml index 80e64a582..e24106545 100644 --- a/kube/services/hatchery/hatchery-deploy.yaml +++ b/kube/services/hatchery/hatchery-deploy.yaml @@ -20,7 +20,6 @@ spec: public: "yes" netnolimit: "yes" userhelper: "yes" - tags.datadoghq.com/service: "hatchery" GEN3_HATCHERY_VERSION GEN3_ENV_LABEL GEN3_DATE_LABEL @@ -93,34 +92,6 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace - - name: DD_ENABLED - valueFrom: - configMapKeyRef: - name: manifest-global - key: dd_enabled - optional: true - - name: DD_ENV - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/env'] - - name: DD_SERVICE - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/service'] - - name: DD_VERSION - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/version'] - - name: DD_LOGS_INJECTION - value: "true" - - name: DD_PROFILING_ENABLED - value: "true" - - name: DD_TRACE_SAMPLE_RATE - value: "1" - - name: DD_AGENT_HOST - valueFrom: - fieldRef: - fieldPath: status.hostIP - name: PRISMA_ACCESS_KEY_ID valueFrom: secretKeyRef: diff --git a/kube/services/indexd/indexd-deploy.yaml b/kube/services/indexd/indexd-deploy.yaml index af60e9b4a..76f672e60 100644 --- a/kube/services/indexd/indexd-deploy.yaml +++ b/kube/services/indexd/indexd-deploy.yaml @@ -23,7 +23,6 @@ spec: app: indexd release: production public: "yes" - tags.datadoghq.com/service: "indexd" GEN3_ENV_LABEL GEN3_INDEXD_VERSION GEN3_DATE_LABEL @@ -78,34 +77,6 @@ spec: - name: indexd GEN3_INDEXD_IMAGE env: - - name: DD_ENABLED - valueFrom: - configMapKeyRef: - name: manifest-global - key: dd_enabled - optional: true - - name: DD_ENV - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/env'] - - name: DD_SERVICE - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/service'] - - name: DD_VERSION - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/version'] - - name: DD_LOGS_INJECTION - value: "true" - - name: DD_PROFILING_ENABLED - value: "true" - - name: DD_TRACE_SAMPLE_RATE - value: "1" - - name: DD_AGENT_HOST - valueFrom: - fieldRef: - fieldPath: status.hostIP - name: GEN3_DEBUG GEN3_DEBUG_FLAG|-value: "False"-| - name: DIST diff --git a/kube/services/jenkins-worker/jenkins-worker-deploy.yaml b/kube/services/jenkins-worker/jenkins-worker-deploy.yaml index aea836a4f..16582b147 100644 --- a/kube/services/jenkins-worker/jenkins-worker-deploy.yaml +++ b/kube/services/jenkins-worker/jenkins-worker-deploy.yaml @@ -118,10 +118,6 @@ spec: secretKeyRef: name: jenkins-g3auto key: google_app_creds.json - - name: DD_AGENT_HOST - valueFrom: - fieldRef: - fieldPath: status.hostIP resources: limits: cpu: "0.6" diff --git a/kube/services/jenkins2-worker/jenkins2-worker-deploy.yaml b/kube/services/jenkins2-worker/jenkins2-worker-deploy.yaml index 5646e8bc2..8c5768bda 100644 --- a/kube/services/jenkins2-worker/jenkins2-worker-deploy.yaml +++ b/kube/services/jenkins2-worker/jenkins2-worker-deploy.yaml @@ -115,10 +115,6 @@ spec: secretKeyRef: name: jenkins-g3auto key: google_app_creds.json - - name: DD_AGENT_HOST - valueFrom: - fieldRef: - fieldPath: status.hostIP resources: limits: cpu: 0.6 diff --git a/kube/services/netpolicy/gen3/services/datadog_netpolicy.yaml b/kube/services/netpolicy/gen3/services/datadog_netpolicy.yaml deleted file mode 100644 index 87b71392f..000000000 --- a/kube/services/netpolicy/gen3/services/datadog_netpolicy.yaml +++ /dev/null @@ -1,25 +0,0 @@ -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: netpolicy-datadog -spec: - podSelector: - matchExpressions: - - key: app - operator: In - values: - - cohort-middleware - ingress: - - from: - - ipBlock: - cidr: 0.0.0.0/0 - ports: - - port: 8126 - egress: - - to: - - namespaceSelector: - matchLabels: - app: datadog - policyTypes: - - Ingress - - Egress \ No newline at end of file diff --git a/kube/services/ohif-viewer/ohif-viewer-deploy.yaml b/kube/services/ohif-viewer/ohif-viewer-deploy.yaml index e2df93cd0..93c3b1f5e 100644 --- a/kube/services/ohif-viewer/ohif-viewer-deploy.yaml +++ b/kube/services/ohif-viewer/ohif-viewer-deploy.yaml @@ -42,30 +42,6 @@ spec: - name: ohif-viewer GEN3_OHIF-VIEWER_IMAGE env: - - name: DD_ENABLED - valueFrom: - configMapKeyRef: - name: manifest-global - key: dd_enabled - optional: true - - name: DD_ENV - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/env'] - - name: DD_SERVICE - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/service'] - - name: DD_VERSION - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/version'] - - name: DD_LOGS_INJECTION - value: "true" - - name: DD_AGENT_HOST - valueFrom: - fieldRef: - fieldPath: status.hostIP - name: PORT value: "8080" - name: PUBLIC_URL diff --git a/kube/services/orthanc/orthanc-deploy.yaml b/kube/services/orthanc/orthanc-deploy.yaml index c04c74205..47450de45 100644 --- a/kube/services/orthanc/orthanc-deploy.yaml +++ b/kube/services/orthanc/orthanc-deploy.yaml @@ -43,30 +43,6 @@ spec: - name: orthanc GEN3_ORTHANC_IMAGE env: - - name: DD_ENABLED - valueFrom: - configMapKeyRef: - name: manifest-global - key: dd_enabled - optional: true - - name: DD_ENV - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/env'] - - name: DD_SERVICE - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/service'] - - name: DD_VERSION - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/version'] - - name: DD_LOGS_INJECTION - value: "true" - - name: DD_AGENT_HOST - valueFrom: - fieldRef: - fieldPath: status.hostIP - name: DICOM_WEB_PLUGIN_ENABLED value: "true" - name: TCIA_PLUGIN_ENABLED diff --git a/kube/services/peregrine/peregrine-deploy.yaml b/kube/services/peregrine/peregrine-deploy.yaml index 6467fe325..a331934e1 100644 --- a/kube/services/peregrine/peregrine-deploy.yaml +++ b/kube/services/peregrine/peregrine-deploy.yaml @@ -25,7 +25,6 @@ spec: public: "yes" # to download dictionary s3: "yes" - tags.datadoghq.com/service: "peregrine" GEN3_ENV_LABEL GEN3_PEREGRINE_VERSION GEN3_DATE_LABEL @@ -87,34 +86,6 @@ spec: env: - name: GEN3_UWSGI_TIMEOUT value: "600" - - name: DD_ENABLED - valueFrom: - configMapKeyRef: - name: manifest-global - key: dd_enabled - optional: true - - name: DD_ENV - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/env'] - - name: DD_SERVICE - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/service'] - - name: DD_VERSION - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/version'] - - name: DD_LOGS_INJECTION - value: "true" - - name: DD_PROFILING_ENABLED - value: "true" - - name: DD_TRACE_SAMPLE_RATE - value: "1" - - name: DD_AGENT_HOST - valueFrom: - fieldRef: - fieldPath: status.hostIP - name: DICTIONARY_URL valueFrom: configMapKeyRef: diff --git a/kube/services/pidgin/pidgin-deploy.yaml b/kube/services/pidgin/pidgin-deploy.yaml index 8448f66f9..8f4855a45 100644 --- a/kube/services/pidgin/pidgin-deploy.yaml +++ b/kube/services/pidgin/pidgin-deploy.yaml @@ -19,7 +19,6 @@ spec: app: pidgin public: "yes" netnolimit: "yes" - tags.datadoghq.com/service: "pidgin" GEN3_ENV_LABEL GEN3_PIDGIN_VERSION GEN3_DATE_LABEL @@ -64,34 +63,6 @@ spec: - name: pidgin GEN3_PIDGIN_IMAGE env: - - name: DD_ENABLED - valueFrom: - configMapKeyRef: - name: manifest-global - key: dd_enabled - optional: true - - name: DD_ENV - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/env'] - - name: DD_SERVICE - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/service'] - - name: DD_VERSION - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/version'] - - name: DD_LOGS_INJECTION - value: "true" - - name: DD_PROFILING_ENABLED - value: "true" - - name: DD_TRACE_SAMPLE_RATE - value: "1" - - name: DD_AGENT_HOST - valueFrom: - fieldRef: - fieldPath: status.hostIP - name: GEN3_DEBUG GEN3_DEBUG_FLAG|-value: "False"-| livenessProbe: diff --git a/kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml b/kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml index 375f424ed..c82a1a98e 100644 --- a/kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml +++ b/kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml @@ -26,7 +26,6 @@ spec: netnolimit: "yes" public: "yes" userhelper: "yes" - tags.datadoghq.com/service: "presigned-url-fence" GEN3_ENV_LABEL GEN3_FENCE_VERSION GEN3_DATE_LABEL @@ -117,34 +116,6 @@ spec: - name: fence GEN3_FENCE_IMAGE env: - - name: DD_ENABLED - valueFrom: - configMapKeyRef: - name: manifest-global - key: dd_enabled - optional: true - - name: DD_ENV - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/env'] - - name: DD_SERVICE - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/service'] - - name: DD_VERSION - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/version'] - - name: DD_LOGS_INJECTION - value: "true" - - name: DD_PROFILING_ENABLED - value: "true" - - name: DD_TRACE_SAMPLE_RATE - value: "1" - - name: DD_AGENT_HOST - valueFrom: - fieldRef: - fieldPath: status.hostIP - name: NGINX_RATE_LIMIT value: "6" - name: PYTHONPATH diff --git a/kube/services/sheepdog/sheepdog-deploy.yaml b/kube/services/sheepdog/sheepdog-deploy.yaml index 2f476d0f0..fb694cb26 100644 --- a/kube/services/sheepdog/sheepdog-deploy.yaml +++ b/kube/services/sheepdog/sheepdog-deploy.yaml @@ -23,7 +23,6 @@ spec: public: "yes" # to download dictionary s3: "yes" - tags.datadoghq.com/service: "sheepdog" GEN3_ENV_LABEL GEN3_SHEEPDOG_VERSION GEN3_DATE_LABEL @@ -97,34 +96,6 @@ spec: env: - name: GEN3_UWSGI_TIMEOUT value: "600" - - name: DD_ENABLED - valueFrom: - configMapKeyRef: - name: manifest-global - key: dd_enabled - optional: true - - name: DD_ENV - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/env'] - - name: DD_SERVICE - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/service'] - - name: DD_VERSION - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/version'] - - name: DD_LOGS_INJECTION - value: "true" - - name: DD_PROFILING_ENABLED - value: "true" - - name: DD_TRACE_SAMPLE_RATE - value: "1" - - name: DD_AGENT_HOST - valueFrom: - fieldRef: - fieldPath: status.hostIP - name: DICTIONARY_URL valueFrom: configMapKeyRef: diff --git a/kube/services/wts/wts-deploy.yaml b/kube/services/wts/wts-deploy.yaml index c6c4ffe74..b4755c8a0 100644 --- a/kube/services/wts/wts-deploy.yaml +++ b/kube/services/wts/wts-deploy.yaml @@ -25,7 +25,6 @@ spec: public: "yes" netnolimit: "yes" userhelper: "yes" - tags.datadoghq.com/service: "token-service" GEN3_DATE_LABEL GEN3_WTS_VERSION GEN3_ENV_LABEL @@ -77,40 +76,12 @@ spec: value: "/var/www/wts/appcreds.json" - name: AUTH_PLUGINS value: k8s - - name: DD_ENABLED - valueFrom: - configMapKeyRef: - name: manifest-global - key: dd_enabled - optional: true - - name: DD_ENV - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/env'] - - name: DD_SERVICE - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/service'] - - name: DD_VERSION - valueFrom: - fieldRef: - fieldPath: metadata.labels['tags.datadoghq.com/version'] - - name: DD_LOGS_INJECTION - value: "true" - - name: DD_PROFILING_ENABLED - value: "true" - - name: DD_TRACE_SAMPLE_RATE - value: "1" - name: GEN3_UWSGI_TIMEOUT valueFrom: configMapKeyRef: name: manifest-global key: uwsgi-timeout optional: true - - name: DD_AGENT_HOST - valueFrom: - fieldRef: - fieldPath: status.hostIP volumeMounts: - name: "wts-secret" readOnly: true From 35f6e4271b51fa5db6ec3490d44e4e38fdccf6b5 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Wed, 11 Dec 2024 08:52:47 -0700 Subject: [PATCH 3/4] updating the kube-setup-indexd job to apply single table driver settings.py for specific environments. (#2683) --- .../indexd_multi_table/indexd_settings.py | 79 +++++++++++++++++++ gen3/bin/kube-setup-indexd.sh | 11 ++- 2 files changed, 88 insertions(+), 2 deletions(-) create mode 100644 apis_configs/indexd_multi_table/indexd_settings.py diff --git a/apis_configs/indexd_multi_table/indexd_settings.py b/apis_configs/indexd_multi_table/indexd_settings.py new file mode 100644 index 000000000..54419c108 --- /dev/null +++ b/apis_configs/indexd_multi_table/indexd_settings.py @@ -0,0 +1,79 @@ +from indexd.index.drivers.alchemy import SQLAlchemyIndexDriver +from indexd.alias.drivers.alchemy import SQLAlchemyAliasDriver +from indexd.auth.drivers.alchemy import SQLAlchemyAuthDriver +from indexd.index.drivers.single_table_alchemy import SingleTableSQLAlchemyIndexDriver +import config_helper +from os import environ +import json + +APP_NAME = "indexd" + + +def load_json(file_name): + return config_helper.load_json(file_name, APP_NAME) + + +conf_data = load_json("creds.json") + +usr = conf_data.get("db_username", "{{db_username}}") +db = conf_data.get("db_database", "{{db_database}}") +psw = conf_data.get("db_password", "{{db_password}}") +pghost = conf_data.get("db_host", "{{db_host}}") +pgport = 5432 +index_config = conf_data.get("index_config") +CONFIG = {} + +CONFIG["JSONIFY_PRETTYPRINT_REGULAR"] = False + +dist = environ.get("DIST", None) +if dist: + CONFIG["DIST"] = json.loads(dist) + +arborist = environ.get("ARBORIST", "false").lower() == "true" + +USE_SINGLE_TABLE = True + +if USE_SINGLE_TABLE is True: + + + CONFIG["INDEX"] = { + "driver": SingleTableSQLAlchemyIndexDriver( + "postgresql+psycopg2://{usr}:{psw}@{pghost}:{pgport}/{db}".format( + usr=usr, psw=psw, pghost=pghost, pgport=pgport, db=db + ), + index_config=index_config, + ) + } +else: + CONFIG["INDEX"] = { + "driver": SQLAlchemyIndexDriver( + "postgresql+psycopg2://{usr}:{psw}@{pghost}:{pgport}/{db}".format( + usr=usr, psw=psw, pghost=pghost, pgport=pgport, db=db + ), + index_config=index_config, + ) + } + +CONFIG["ALIAS"] = { + "driver": SQLAlchemyAliasDriver( + "postgresql+psycopg2://{usr}:{psw}@{pghost}:{pgport}/{db}".format( + usr=usr, psw=psw, pghost=pghost, pgport=pgport, db=db + ) + ) +} + +if arborist: + AUTH = SQLAlchemyAuthDriver( + "postgresql+psycopg2://{usr}:{psw}@{pghost}:{pgport}/{db}".format( + usr=usr, psw=psw, pghost=pghost, pgport=pgport, db=db + ), + arborist="http://arborist-service/", + ) +else: + AUTH = SQLAlchemyAuthDriver( + "postgresql+psycopg2://{usr}:{psw}@{pghost}:{pgport}/{db}".format( + usr=usr, psw=psw, pghost=pghost, pgport=pgport, db=db + ) + ) + +settings = {"config": CONFIG, "auth": AUTH} \ No newline at end of file diff --git a/gen3/bin/kube-setup-indexd.sh b/gen3/bin/kube-setup-indexd.sh index a2a172758..e7395d1b3 100644 --- a/gen3/bin/kube-setup-indexd.sh +++ b/gen3/bin/kube-setup-indexd.sh @@ -6,6 +6,9 @@ source "${GEN3_HOME}/gen3/lib/utils.sh" gen3_load "gen3/lib/kube-setup-init" +manifestPath=$(g3k_manifest_path) +singleTable="$(jq -r ".[\"global\"][\"indexd_single_table\"]" < "$manifestPath" | tr '[:upper:]' '[:lower:]')" + [[ -z "$GEN3_ROLL_ALL" ]] && gen3 kube-setup-secrets if [[ ! -f "$(gen3_secrets_folder)/.rendered_indexd_userdb" ]]; then @@ -19,8 +22,12 @@ if [[ ! -f "$(gen3_secrets_folder)/.rendered_indexd_userdb" ]]; then fi g3kubectl delete secrets/indexd-secret > /dev/null 2>&1 || true; -g3kubectl create secret generic indexd-secret --from-file=local_settings.py="${GEN3_HOME}/apis_configs/indexd_settings.py" "--from-file=${GEN3_HOME}/apis_configs/config_helper.py" - +if "$singleTable" = true; then + g3kubectl create secret generic indexd-secret --from-file=local_settings.py="${GEN3_HOME}/apis_configs/indexd_multi_table/indexd_settings.py" "--from-file=${GEN3_HOME}/apis_configs/config_helper.py" +else + g3kubectl create secret generic indexd-secret --from-file=local_settings.py="${GEN3_HOME}/apis_configs/indexd_settings.py" "--from-file=${GEN3_HOME}/apis_configs/config_helper.py" +fi + gen3 roll indexd g3kubectl apply -f "${GEN3_HOME}/kube/services/indexd/indexd-service.yaml" gen3 roll indexd-canary || true From df6e8aca31f8800a0a9d7c962be956b67dbe8539 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Wed, 11 Dec 2024 10:16:15 -0700 Subject: [PATCH 4/4] IRSA Changes (#2531) * making changes to kube-setup-batch-export, so it can use IRSA instead of AWS keys * updating the logging for this script and removing unneeded variables --------- Co-authored-by: Mingfei Shao <2475897+mfshao@users.noreply.github.com> --- gen3/bin/kube-setup-batch-export.sh | 53 ++++++++++++++++++----------- 1 file changed, 34 insertions(+), 19 deletions(-) diff --git a/gen3/bin/kube-setup-batch-export.sh b/gen3/bin/kube-setup-batch-export.sh index 25b3f5bb0..7861f5024 100644 --- a/gen3/bin/kube-setup-batch-export.sh +++ b/gen3/bin/kube-setup-batch-export.sh @@ -11,30 +11,45 @@ if ! g3kubectl get secrets | grep batch-export-g3auto /dev/null 2>&1; then hostname="$(gen3 api hostname)" ref_hostname=$(echo "$hostname" | sed 's/\./-/g') bucket_name="${ref_hostname}-batch-export-bucket" - aws_user="${ref_hostname}-batch-export-user" - mkdir -p $(gen3_secrets_folder)/g3auto/batch-export - creds_file="$(gen3_secrets_folder)/g3auto/batch-export/config.json" - - gen3_log_info "Creating batch export secret" + sa_name="batch-export-sa" + + gen3_log_info "Creating batch export bucket" if [[ -z "$JENKINS_HOME" ]]; then gen3 s3 create $bucket_name - gen3 awsuser create $aws_user - gen3 s3 attach-bucket-policy $bucket_name --read-write --user-name $aws_user - gen3 secrets sync "aws reources for batch export" - - gen3_log_info "initializing batch-export config.json" - user=$(gen3 secrets decode $aws_user-g3auto awsusercreds.json) - key_id=$(jq -r .id <<< $user) - access_key=$(jq -r .secret <<< $user) - cat - > $creds_file < "export-job-aws-policy.json" < /dev/null 2>&1; then + if ! gen3 iam-serviceaccount -c "${sa_name}" -p ./export-job-aws-policy.json; then + gen3_log_err "Failed to create iam service account" + return 1 + fi + gen3_log_info "created service account 'batch-export-sa' with s3 access" + gen3_log_info "created role name '${role_name}'" + fi + + gen3_log_info "creating batch-export-g3auto configmap" + kubectl create configmap batch-export-g3auto --from-literal=bucket_name="$bucket_name" fi fi